diff --git a/Cargo.lock b/Cargo.lock index f71b4f651ee0..11ba8a461d78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,15 +36,15 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.3.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0070905b2c4a98d184c4e81025253cb192aa8a73827553f38e9410801ceb35bb" +checksum = "c2079246596c18b4a33e274ae10c0e50613f4d32a4198e09c7b93771013fed74" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", - "ahash", + "ahash 0.8.3", "base64 0.21.0", "bitflags", "brotli", @@ -58,7 +58,7 @@ dependencies = [ "http", "httparse", "httpdate", - "itoa 1.0.5", + "itoa 1.0.6", "language-tags", "local-channel", "mime", @@ -79,7 +79,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "syn 1.0.109", ] @@ -161,7 +161,7 @@ dependencies = [ "actix-service", "actix-utils", "actix-web-codegen", - "ahash", + "ahash 0.7.6", "bytes 1.4.0", "bytestring", "cfg-if 1.0.0", @@ -171,7 +171,7 @@ dependencies = [ "futures-core", "futures-util", "http", - "itoa 1.0.5", + "itoa 1.0.6", "language-tags", "log", "mime", @@ -194,8 +194,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -292,6 +292,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if 1.0.0", + "getrandom 0.2.8", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.20" @@ -357,7 +369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.23", + "quote 1.0.26", "syn 1.0.109", ] @@ -456,12 +468,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -538,8 +549,8 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -551,12 +562,12 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -622,12 +633,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.1" @@ -667,7 +672,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.26", + "futures 0.3.27", "hex", "lazy_static", "num_cpus", @@ -711,8 +716,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "regex", "rustc-hash", "shlex", @@ -776,17 +781,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - [[package]] name = "blake2s_const" version = "0.6.0" @@ -808,14 +802,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "blob_purger" -version = "1.0.0" -dependencies = [ - "structopt", - "zksync_dal", -] - [[package]] name = "block-buffer" version = "0.7.3" @@ -840,9 +826,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array 0.14.6", ] @@ -959,9 +945,9 @@ checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bytestring" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f83e57d9154148e355404702e2694463241880b939570d7c97c014da7a69a1" +checksum = "238e4886760d98c4f899360c834fa93e62cf7f721ac3c2da375cbdf4b8679aae" dependencies = [ "bytes 1.4.0", ] @@ -1015,9 +1001,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -1042,7 +1028,7 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#1769d65aa592645a097af20b1739c7cacb0715c1" +source = "git+https://github.com/matter-labs/circuit_testing.git?branch=main#028864449036071cfb4e9ebe7ee4c5be59893031" dependencies = [ "bellman_ce", ] @@ -1096,8 +1082,8 @@ checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -1110,29 +1096,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "cloud-storage" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7602ac4363f68ac757d6b87dd5d850549a14d37489902ae639c06ecec06ad275" -dependencies = [ - "async-trait", - "base64 0.13.1", - "bytes 1.4.0", - "chrono", - "dotenv", - "futures-util", - "hex", - "jsonwebtoken", - "lazy_static", - "openssl", - "percent-encoding", - "reqwest", - "serde", - "serde_json", - "tokio", -] - [[package]] name = "cloudabi" version = "0.0.3" @@ -1196,6 +1159,12 @@ dependencies = [ "crossbeam-utils 0.8.15", ] +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "const-oid" version = "0.9.2" @@ -1464,6 +1433,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array 0.14.6", + "subtle", +] + [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1519,23 +1498,23 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", "syn 1.0.109", ] [[package]] name = "csv" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ "csv-core", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -1555,7 +1534,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "syn 1.0.109", ] @@ -1580,9 +1559,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1592,33 +1571,33 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "scratch", "syn 1.0.109", ] [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -1640,8 +1619,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "strsim 0.10.0", "syn 1.0.109", ] @@ -1653,7 +1632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.23", + "quote 1.0.26", "syn 1.0.109", ] @@ -1674,8 +1653,8 @@ dependencies = [ name = "db_test_macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -1689,13 +1668,24 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", + "crypto-bigint 0.3.2", + "pem-rfc7468", +] + [[package]] name = "der" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid", + "const-oid 0.9.2", "zeroize", ] @@ -1705,8 +1695,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -1717,8 +1707,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustc_version", "syn 1.0.109", ] @@ -1747,22 +1737,11 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] -[[package]] -name = "dirs" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" -dependencies = [ - "libc", - "redox_users 0.3.5", - "winapi 0.3.9", -] - [[package]] name = "dirs" version = "3.0.2" @@ -1779,7 +1758,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", - "redox_users 0.4.3", + "redox_users", "winapi 0.3.9", ] @@ -1801,7 +1780,7 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", + "der 0.6.1", "elliptic-curve", "rfc6979", "signature", @@ -1833,13 +1812,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", - "crypto-bigint", - "der", + "crypto-bigint 0.4.9", + "der 0.6.1", "digest 0.10.6", "ff", "generic-array 0.14.6", "group", - "pkcs8", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1", "subtle", @@ -1959,26 +1938,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "events_tx_initiator_address_migration" -version = "0.1.0" -dependencies = [ - "tokio", - "zksync_dal", - "zksync_types", -] - -[[package]] -name = "expanduser" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e0b79235da57db6b6c2beed9af6e5de867d63a973ae3e91910ddc33ba40bc0" -dependencies = [ - "dirs 1.0.5", - "lazy_static", - "pwd", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -2026,8 +1985,8 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", "syn 1.0.109", ] @@ -2168,9 +2127,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -2183,9 +2142,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -2193,15 +2152,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -2222,9 +2181,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -2243,26 +2202,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -2276,9 +2235,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2404,6 +2363,89 @@ dependencies = [ "web-sys", ] +[[package]] +name = "google-cloud-auth" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +dependencies = [ + "async-trait", + "base64 0.21.0", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "thiserror", + "time 0.3.20", + "tokio", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-default" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +dependencies = [ + "async-trait", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-storage", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +dependencies = [ + "reqwest", + "thiserror", + "tokio", +] + +[[package]] +name = "google-cloud-storage" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +dependencies = [ + "async-stream", + "base64 0.21.0", + "bytes 1.4.0", + "futures-util", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "regex", + "reqwest", + "ring", + "rsa", + "serde", + "serde_json", + "sha2 0.10.6", + "thiserror", + "time 0.3.20", + "tokio", + "tokio-util 0.7.7", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +dependencies = [ + "async-trait", +] + [[package]] name = "governor" version = "0.4.2" @@ -2411,7 +2453,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", - "futures 0.3.26", + "futures 0.3.27", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2477,7 +2519,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -2486,7 +2528,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -2597,6 +2639,15 @@ dependencies = [ "digest 0.10.6", ] +[[package]] +name = "home" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2616,7 +2667,7 @@ checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes 1.4.0", "fnv", - "itoa 1.0.5", + "itoa 1.0.6", ] [[package]] @@ -2650,9 +2701,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -2663,7 +2714,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa 1.0.6", "pin-project-lite", "socket2", "tokio", @@ -2797,8 +2848,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -2823,9 +2874,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" dependencies = [ "libc", "windows-sys 0.45.0", @@ -2881,9 +2932,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" @@ -2909,7 +2960,7 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "derive_more", - "futures 0.3.26", + "futures 0.3.27", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-pubsub", "log", @@ -2923,7 +2974,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "futures-executor", "futures-util", "log", @@ -2937,7 +2988,7 @@ name = "jsonrpc-core" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "futures-executor", "futures-util", "log", @@ -2951,7 +3002,7 @@ name = "jsonrpc-core-client" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "jsonrpc-client-transports", ] @@ -2961,8 +3012,8 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -2971,7 +3022,7 @@ name = "jsonrpc-http-server" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "hyper", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-server-utils", @@ -2986,7 +3037,7 @@ name = "jsonrpc-pubsub" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "lazy_static", "log", @@ -3001,7 +3052,7 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "bytes 1.4.0", - "futures 0.3.26", + "futures 0.3.27", "globset", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "lazy_static", @@ -3017,7 +3068,7 @@ name = "jsonrpc-ws-server" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-server-utils", "log", @@ -3124,8 +3175,8 @@ checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -3190,11 +3241,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ - "base64 0.12.3", + "base64 0.13.1", "pem", "ring", "serde", @@ -3253,6 +3304,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin", +] [[package]] name = "lazycell" @@ -3262,9 +3316,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" @@ -3329,7 +3383,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.26", + "futures 0.3.27", "hex", "num 0.3.1", "once_cell", @@ -3467,7 +3521,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ - "ahash", + "ahash 0.7.6", "metrics-macros", "portable-atomic", ] @@ -3497,8 +3551,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -3525,6 +3579,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3710,38 +3774,44 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.2.6" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits", + "rand 0.7.3", + "serde", ] [[package]] name = "num-bigint" -version = "0.3.3" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits", - "rand 0.7.3", "serde", ] [[package]] -name = "num-bigint" -version = "0.4.3" +name = "num-bigint-dig" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ - "autocfg 1.1.0", + "byteorder", + "lazy_static", + "libm", "num-integer", + "num-iter", "num-traits", - "serde", + "rand 0.8.5", + "smallvec", + "zeroize", ] [[package]] @@ -3781,8 +3851,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -3906,8 +3976,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -3969,7 +4039,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.26", + "futures 0.3.27", "futures-util", "http", "opentelemetry", @@ -4073,8 +4143,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -4132,7 +4202,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.2.16", + "redox_syscall", "smallvec", "winapi 0.3.9", ] @@ -4145,7 +4215,7 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.16", + "redox_syscall", "smallvec", "windows-sys 0.45.0", ] @@ -4162,9 +4232,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -4196,13 +4266,20 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ "base64 0.13.1", - "once_cell", - "regex", +] + +[[package]] +name = "pem-rfc7468" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +dependencies = [ + "base64ct", ] [[package]] @@ -4213,9 +4290,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -4223,9 +4300,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -4233,22 +4310,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] [[package]] name = "pest_meta" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", @@ -4280,8 +4357,8 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -4297,14 +4374,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +dependencies = [ + "der 0.5.1", + "pkcs8 0.8.0", + "zeroize", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der 0.5.1", + "spki 0.5.4", + "zeroize", +] + [[package]] name = "pkcs8" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -4343,16 +4442,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.45.0", ] [[package]] @@ -4396,7 +4497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit 0.19.4", + "toml_edit 0.19.6", ] [[package]] @@ -4406,8 +4507,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", "version_check", ] @@ -4418,8 +4519,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "version_check", ] @@ -4440,9 +4541,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -4496,8 +4597,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -4511,16 +4612,6 @@ dependencies = [ "prost", ] -[[package]] -name = "pwd" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c71c0c79b9701efe4e1e4b563b2016dd4ee789eb99badcb09d61ac4b92e4a2" -dependencies = [ - "libc", - "thiserror", -] - [[package]] name = "quanta" version = "0.9.3" @@ -4564,11 +4655,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", ] [[package]] @@ -4788,9 +4879,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -4798,9 +4889,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel 0.5.7", "crossbeam-deque 0.8.3", @@ -4817,12 +4908,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - [[package]] name = "redox_syscall" version = "0.2.16" @@ -4832,17 +4917,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "redox_users" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" -dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", -] - [[package]] name = "redox_users" version = "0.4.3" @@ -4850,7 +4924,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.8", - "redox_syscall 0.2.16", + "redox_syscall", "thiserror", ] @@ -4910,6 +4984,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -4959,7 +5034,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] @@ -5011,15 +5086,23 @@ dependencies = [ ] [[package]] -name = "rust-argon2" -version = "0.8.3" +name = "rsa" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ - "base64 0.13.1", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils 0.8.15", + "byteorder", + "digest 0.10.6", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8 0.8.0", + "rand_core 0.6.4", + "smallvec", + "subtle", + "zeroize", ] [[package]] @@ -5057,9 +5140,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", @@ -5104,15 +5187,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "salsa20" @@ -5149,9 +5232,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -5186,9 +5269,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", - "der", + "der 0.6.1", "generic-array 0.14.6", - "pkcs8", + "pkcs8 0.9.0", "subtle", "zeroize", ] @@ -5246,9 +5329,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "send_wrapper" @@ -5353,9 +5436,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -5372,23 +5455,23 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "indexmap", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -5400,7 +5483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -5423,20 +5506,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] -[[package]] -name = "set_correct_tx_format_for_priority_ops" -version = "0.1.0" -dependencies = [ - "tokio", - "zksync_dal", - "zksync_types", -] - [[package]] name = "sha-1" version = "0.8.2" @@ -5565,13 +5639,14 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", - "num-bigint 0.2.6", + "num-bigint 0.4.3", "num-traits", + "thiserror", + "time 0.3.20", ] [[package]] @@ -5597,9 +5672,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", @@ -5613,7 +5688,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes 1.4.0", - "futures 0.3.26", + "futures 0.3.27", "http", "httparse", "log", @@ -5627,6 +5702,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der 0.5.1", +] + [[package]] name = "spki" version = "0.6.0" @@ -5634,7 +5719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", ] [[package]] @@ -5670,7 +5755,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aec89bfaca8f7737439bad16d52b07f1ccd0730520d3bf6ae9d069fe4b641fb1" dependencies = [ - "ahash", + "ahash 0.7.6", "atoi", "base64 0.13.1", "bigdecimal", @@ -5682,7 +5767,7 @@ dependencies = [ "crossbeam-channel 0.5.7", "crossbeam-queue 0.3.8", "crossbeam-utils 0.8.15", - "dirs 3.0.2", + "dirs", "either", "futures-channel", "futures-core", @@ -5727,8 +5812,8 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", "serde_json", "sha2 0.9.9", @@ -5755,15 +5840,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "storage_logs_migration" -version = "0.1.0" -dependencies = [ - "tokio", - "zksync_dal", - "zksync_types", -] - [[package]] name = "stringprep" version = "0.1.2" @@ -5805,8 +5881,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -5826,8 +5902,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustversion", "syn 1.0.109", ] @@ -5855,15 +5931,15 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "unicode-ident", ] [[package]] name = "sync_vm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "arrayvec 0.7.2", "cs_derive", @@ -5930,7 +6006,7 @@ checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.2.16", + "redox_syscall", "rustix", "windows-sys 0.42.0", ] @@ -5950,8 +6026,8 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -5966,21 +6042,21 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -6010,7 +6086,7 @@ version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ - "itoa 1.0.5", + "itoa 1.0.6", "serde", "time-core", "time-macros", @@ -6076,9 +6152,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", @@ -6091,7 +6167,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -6110,8 +6186,8 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -6204,9 +6280,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" dependencies = [ "indexmap", "toml_datetime", @@ -6250,9 +6326,9 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", "prost-build", - "quote 1.0.23", + "quote 1.0.26", "syn 1.0.109", ] @@ -6307,8 +6383,8 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", ] @@ -6439,15 +6515,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -6513,6 +6589,12 @@ dependencies = [ "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" + [[package]] name = "uuid" version = "1.3.0" @@ -6661,8 +6743,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", "wasm-bindgen-shared", ] @@ -6685,7 +6767,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "wasm-bindgen-macro-support", ] @@ -6695,8 +6777,8 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -6743,7 +6825,7 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures 0.3.26", + "futures 0.3.27", "futures-timer", "headers", "hex", @@ -6781,15 +6863,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -6803,9 +6876,9 @@ dependencies = [ [[package]] name = "whoami" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45dbc71f0cdca27dc261a9bd37ddec174e4a0af2b900b890f378460f745426e3" +checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68" dependencies = [ "wasm-bindgen", "web-sys", @@ -6880,9 +6953,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -6895,51 +6968,51 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winnow" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf09497b8f8b5ac5d3bb4d05c0a99be20f26fd3d5f2db7b0716e946d5103658" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" dependencies = [ "memchr", ] @@ -6978,7 +7051,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=main#ca203a25cbff50ad623630e393dd041aca58038d" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" dependencies = [ "blake2 0.10.6", "k256", @@ -6994,8 +7067,8 @@ dependencies = [ [[package]] name = "zkevm-assembly" -version = "1.3.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=main#b995edca9b3e263f0d989f3c8c31eed1450fe3fc" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" dependencies = [ "env_logger 0.9.3", "hex", @@ -7005,6 +7078,7 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "regex", + "sha3 0.10.6", "smallvec", "structopt", "thiserror", @@ -7014,7 +7088,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=main#b9fd187b477358465b7e332f72e5bebfe64f02b8" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" dependencies = [ "bitflags", "ethereum-types", @@ -7025,7 +7099,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=main#e4e6aaf78b45280ec99057ba45a393776d8e45a2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" dependencies = [ "bincode", "blake2 0.10.6", @@ -7054,7 +7128,7 @@ dependencies = [ [[package]] name = "zksync" -version = "0.3.0" +version = "0.4.0" dependencies = [ "anyhow", "hex", @@ -7084,7 +7158,7 @@ version = "1.0.0" dependencies = [ "async-trait", "convert_case 0.6.0", - "futures 0.3.26", + "futures 0.3.27", "hex", "serde", "serde_json", @@ -7121,7 +7195,7 @@ dependencies = [ "chrono", "ctrlc", "ethabi", - "futures 0.3.26", + "futures 0.3.27", "hex", "lazy_static", "metrics", @@ -7146,6 +7220,7 @@ dependencies = [ "ethabi", "hex", "once_cell", + "serde", "serde_json", "zksync_utils", ] @@ -7166,7 +7241,7 @@ dependencies = [ "chrono", "ctrlc", "db_test_macro", - "futures 0.3.26", + "futures 0.3.27", "governor", "hex", "itertools", @@ -7284,7 +7359,7 @@ dependencies = [ "actix-rt", "actix-web", "async-trait", - "futures 0.3.26", + "futures 0.3.27", "hex", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto", @@ -7318,7 +7393,7 @@ dependencies = [ "byteorder", "criterion", "fnv", - "futures 0.3.26", + "futures 0.3.27", "itertools", "metrics", "once_cell", @@ -7351,8 +7426,10 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ - "cloud-storage", - "expanduser", + "google-cloud-auth", + "google-cloud-default", + "google-cloud-storage", + "http", "metrics", "tempdir", "tokio", @@ -7368,6 +7445,8 @@ dependencies = [ "metrics", "reqwest", "vlog", + "zksync_config", + "zksync_utils", ] [[package]] @@ -7423,7 +7502,7 @@ name = "zksync_testkit" version = "1.0.0" dependencies = [ "anyhow", - "futures 0.3.26", + "futures 0.3.27", "num 0.3.1", "once_cell", "rand 0.7.3", @@ -7491,9 +7570,11 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.26", + "futures 0.3.27", "hex", + "itertools", "num 0.3.1", + "reqwest", "serde", "serde_json", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index 97b46e9d50af..cf6bd9c1f6b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,12 +14,6 @@ members = [ # "core/bin/setup_key_generator_and_server", # Verification key generator and server "core/bin/verification_key_generator_and_server", - # Migration to fill events.tx_initiator_address table for old events - "core/bin/events_tx_initiator_address_migration", - "core/bin/set_correct_tx_format_for_priority_ops", - "core/bin/storage_logs_migration", - # Tool for removing blobs from database - "core/bin/blob_purger", # circuit synthesizer: its commented as it cannot be built with stable rust. # "core/bin/circuit_synthesizer", # Libraries @@ -56,7 +50,7 @@ members = [ ] resolver = "2" -exclude = [ "core/bin/prover", "core/bin/setup_key_generator_and_server", "core/bin/circuit_synthesizer"] +exclude = [ "core/bin/prover", "core/bin/circuit_synthesizer", "core/bin/setup_key_generator_and_server"] [profile.test.package.zksync_merkle_tree] opt-level = 3 diff --git a/bin/ci_run b/bin/ci_run index ea9604acdb2e..b76fce10ac70 100755 --- a/bin/ci_run +++ b/bin/ci_run @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Runs the command from within CI docker-compose environment. cd $ZKSYNC_HOME diff --git a/bin/run_loadtest_from_github_actions b/bin/run_loadtest_from_github_actions index ee7b024b284a..78811f21bc56 100755 --- a/bin/run_loadtest_from_github_actions +++ b/bin/run_loadtest_from_github_actions @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Prepare environments IFS=',' diff --git a/bin/zk b/bin/zk index a6f51fd2c6dd..d0840cb8d500 100755 --- a/bin/zk +++ b/bin/zk @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash if [ -z "$1" ]; then cd $ZKSYNC_HOME diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md new file mode 100644 index 000000000000..0ce92e5d12cd --- /dev/null +++ b/core/CHANGELOG.md @@ -0,0 +1,95 @@ +# Changelog + +## [3.0.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.11.1...v3.0.0) (2023-03-22) + + +### âš  BREAKING CHANGES + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) + +### Features + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) ([d28e01c](https://github.com/matter-labs/zksync-2-dev/commit/d28e01ce0fbf0129c2cbba877efe65da7f7ed367)) +* env var for state keeper to finish l1 batch and stop ([#1538](https://github.com/matter-labs/zksync-2-dev/issues/1538)) ([eaa0cce](https://github.com/matter-labs/zksync-2-dev/commit/eaa0cce81e683bd10b1c85b06bc04a7de578e02e)) +* **external node:** Implement transaction proxy ([#1534](https://github.com/matter-labs/zksync-2-dev/issues/1534)) ([19b6a85](https://github.com/matter-labs/zksync-2-dev/commit/19b6a8595e5e8e8399bacf6e2308e553d567a2b5)) +* **external node:** Sync layer implementation ([#1525](https://github.com/matter-labs/zksync-2-dev/issues/1525)) ([47b9a1d](https://github.com/matter-labs/zksync-2-dev/commit/47b9a1d30cc87f7128ef29eb5d0851276d71b7d1)) +* **prover-generalized:** added a generalized prover-group for integration test ([#1526](https://github.com/matter-labs/zksync-2-dev/issues/1526)) ([f921886](https://github.com/matter-labs/zksync-2-dev/commit/f9218866cd790975b7f97be6a4a59192a1da8b3a)) +* **vm:** vm memory metrics ([#1564](https://github.com/matter-labs/zksync-2-dev/issues/1564)) ([ee45d47](https://github.com/matter-labs/zksync-2-dev/commit/ee45d477e6c393277923bfc64226ea03290a01a0)) + + +### Bug Fixes + +* **witness-generator:** Fix witness generation for storage application circuit ([#1568](https://github.com/matter-labs/zksync-2-dev/issues/1568)) ([5268ac4](https://github.com/matter-labs/zksync-2-dev/commit/5268ac4558aea7c2ac72bdfc6c57afd25eff1e8c)) + + +### Reverts + +* env var for state keeper to finish l1 batch and stop ([#1545](https://github.com/matter-labs/zksync-2-dev/issues/1545)) ([94701bd](https://github.com/matter-labs/zksync-2-dev/commit/94701bd2fbc590f733346934cfbccae08fc62f1a)) + +## [2.11.1](https://github.com/matter-labs/zksync-2-dev/compare/v2.11.0...v2.11.1) (2023-03-16) + + +### Bug Fixes + +* **witness-generator:** perform sampling only for basic circuit ([#1535](https://github.com/matter-labs/zksync-2-dev/issues/1535)) ([76c3248](https://github.com/matter-labs/zksync-2-dev/commit/76c324883dd7b5026f01add61bef637b2e1c0c5b)) + +## [2.11.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.10.0...v2.11.0) (2023-03-15) + + +### Features + +* Make server compatible with new SDK ([#1532](https://github.com/matter-labs/zksync-2-dev/issues/1532)) ([1c52738](https://github.com/matter-labs/zksync-2-dev/commit/1c527382d1e36c04df90bdf71fe643db724acb48)) + +## [2.10.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.9.0...v2.10.0) (2023-03-14) + + +### Features + +* **explorer api:** L1 batch endpoints ([#1529](https://github.com/matter-labs/zksync-2-dev/issues/1529)) ([f06c95d](https://github.com/matter-labs/zksync-2-dev/commit/f06c95defd79aaea24a3f317236fac537dee63c5)) +* **simpler-sampling:** simplify witness-generator sampling using proof % ([#1514](https://github.com/matter-labs/zksync-2-dev/issues/1514)) ([b4378ac](https://github.com/matter-labs/zksync-2-dev/commit/b4378ac2524f2ca936ee5d53351c7596526ea714)) +* **vm:** limit validation gas ([#1513](https://github.com/matter-labs/zksync-2-dev/issues/1513)) ([09c9afa](https://github.com/matter-labs/zksync-2-dev/commit/09c9afaf0ebe11c513c6779b7c585e75fde80e09)) +* **workload identity support:** Refactor GCS to add workload identity support ([#1503](https://github.com/matter-labs/zksync-2-dev/issues/1503)) ([1880931](https://github.com/matter-labs/zksync-2-dev/commit/188093185241180c54e4edcbc95fb068d890c0e5)) + + +### Bug Fixes + +* **circuit-upgrade:** upgrade circuit to fix synthesizer issue ([#1530](https://github.com/matter-labs/zksync-2-dev/issues/1530)) ([368eeb5](https://github.com/matter-labs/zksync-2-dev/commit/368eeb58b027a3b2c7fe6491d3d17306921d8265)) +* **prover:** query for hanged gpu proofs ([#1522](https://github.com/matter-labs/zksync-2-dev/issues/1522)) ([3c4b597](https://github.com/matter-labs/zksync-2-dev/commit/3c4b597c2637dd6adaa77f0a52a7e7ada1d52918)) +* **synthesizer-alerting:** add sentry_guard variable ([#1524](https://github.com/matter-labs/zksync-2-dev/issues/1524)) ([ced5107](https://github.com/matter-labs/zksync-2-dev/commit/ced51079665a1e64b56f1e712473be90e9a38cb1)) +* **witness-generator:** update logic while persist status in db to prevent race ([#1507](https://github.com/matter-labs/zksync-2-dev/issues/1507)) ([9c295c4](https://github.com/matter-labs/zksync-2-dev/commit/9c295c42ce1e725134f1b610f32e55163e6da349)) + +## [2.9.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.8.0...v2.9.0) (2023-03-09) + + +### Features + +* **external node:** Sync protocol: API changes & fetcher skeleton ([#1498](https://github.com/matter-labs/zksync-2-dev/issues/1498)) ([05da6a8](https://github.com/matter-labs/zksync-2-dev/commit/05da6a857b6d9faa9ba50183272feacc12518482)) +* integrate yul contracts into the server ([#1506](https://github.com/matter-labs/zksync-2-dev/issues/1506)) ([c542c29](https://github.com/matter-labs/zksync-2-dev/commit/c542c2969f72996ab874bd089f096cd123c926a4)) + + +### Bug Fixes + +* abi encoded message length ([#1516](https://github.com/matter-labs/zksync-2-dev/issues/1516)) ([65766ee](https://github.com/matter-labs/zksync-2-dev/commit/65766ee12fb6ab27382c378334dc7176dc233d26)) +* **state-keeper:** Save correct value after executing miniblock ([#1511](https://github.com/matter-labs/zksync-2-dev/issues/1511)) ([5decdda](https://github.com/matter-labs/zksync-2-dev/commit/5decdda60b8880d0ada86f402f2f270572c45601)) +* **witness-generator:** increase limit from 155K to 16M while expanding bootloader ([#1515](https://github.com/matter-labs/zksync-2-dev/issues/1515)) ([05711de](https://github.com/matter-labs/zksync-2-dev/commit/05711de1317edb094cbcf375a9dc75e35662a7a7)) + +## [2.8.0](https://github.com/matter-labs/zksync-2-dev/compare/v2.7.15...v2.8.0) (2023-03-06) + + +### Features + +* **api:** add Geth API errors to our codebase that are not present yet ([#1440](https://github.com/matter-labs/zksync-2-dev/issues/1440)) ([f6cefdd](https://github.com/matter-labs/zksync-2-dev/commit/f6cefdd21083301fce5fa665aa79ceb307b3cc49)) +* **house-keeper:** emit prover queued jobs for each group type ([#1480](https://github.com/matter-labs/zksync-2-dev/issues/1480)) ([ab6d7c4](https://github.com/matter-labs/zksync-2-dev/commit/ab6d7c431ac64619571e227a6680f0552aa7b1ee)) +* **house-keeper:** increase blob cleanup time from 2days to 30 ([8a7ee85](https://github.com/matter-labs/zksync-2-dev/commit/8a7ee8548a7c24235549f714d8668396ab05f026)) +* **house-keeper:** increase blob cleanup time from 2days to 30 ([#1485](https://github.com/matter-labs/zksync-2-dev/issues/1485)) ([8a7ee85](https://github.com/matter-labs/zksync-2-dev/commit/8a7ee8548a7c24235549f714d8668396ab05f026)) +* **state keeper:** precise calculation of initial/repeated writes ([#1486](https://github.com/matter-labs/zksync-2-dev/issues/1486)) ([15ae673](https://github.com/matter-labs/zksync-2-dev/commit/15ae673da09eda47566ef11ea10d7c262d44e272)) +* **vm:** add a few assert to memory impl ([#1476](https://github.com/matter-labs/zksync-2-dev/issues/1476)) ([dfff514](https://github.com/matter-labs/zksync-2-dev/commit/dfff514703ef48eb7a1026f3e9f0ee4c5e9af2f6)) +* **witness-generator:** added last_l1_batch_to_process param for smoo… ([#1477](https://github.com/matter-labs/zksync-2-dev/issues/1477)) ([5d46505](https://github.com/matter-labs/zksync-2-dev/commit/5d4650564799c6e7f22b5fc5cc43ae484eb7f849)) + + +### Bug Fixes + +* **api:** fix tx count query ([#1494](https://github.com/matter-labs/zksync-2-dev/issues/1494)) ([fc5c61b](https://github.com/matter-labs/zksync-2-dev/commit/fc5c61bd65772ea9d4b129a1a8e22a0ab9494aba)) +* **circuits:** update circuits+vk for invalid memory access issue ([#1496](https://github.com/matter-labs/zksync-2-dev/issues/1496)) ([d84a73a](https://github.com/matter-labs/zksync-2-dev/commit/d84a73a3b54688f808be590e13fc4995666e3068)) +* **db:** create index to reduce load from prover_jobs table ([#1251](https://github.com/matter-labs/zksync-2-dev/issues/1251)) ([500f03a](https://github.com/matter-labs/zksync-2-dev/commit/500f03ac753f243e6e525639bc02e28987dcc7dd)) +* **gas_adjuster:** Sub 1 from the last block number for fetching base_fee_history ([#1483](https://github.com/matter-labs/zksync-2-dev/issues/1483)) ([0af2f42](https://github.com/matter-labs/zksync-2-dev/commit/0af2f42b8c7c4635a18af01250213390c2424de9)) diff --git a/core/bin/blob_purger/Cargo.toml b/core/bin/blob_purger/Cargo.toml deleted file mode 100644 index 20fae4cfdd2c..000000000000 --- a/core/bin/blob_purger/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "blob_purger" -version = "1.0.0" -edition = "2018" -authors = ["The Matter Labs Team "] -homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" -keywords = ["blockchain", "zksync"] -categories = ["cryptography"] - - -[[bin]] -name = "zksync_blob_purger" -path = "src/main.rs" - -[dependencies] -zksync_dal = { path = "../../lib/dal", version = "1.0" } -structopt = "0.3.26" diff --git a/core/bin/blob_purger/src/main.rs b/core/bin/blob_purger/src/main.rs deleted file mode 100644 index 2cb9d1f79092..000000000000 --- a/core/bin/blob_purger/src/main.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::str::FromStr; -use std::thread::sleep; -use std::time::Duration; - -use structopt::StructOpt; - -use zksync_dal::ConnectionPool; -const WAIT_TIME_MILLI_SECONDS: u64 = 2500; - -#[derive(Debug)] -enum BlobTable { - WitnessInputs, - LeafAggregationWitnessJobs, - NodeAggregationWitnessJobs, - SchedulerWitnessJobs, - ProverJobs, -} - -impl FromStr for BlobTable { - type Err = String; - fn from_str(table_name: &str) -> Result { - match table_name { - "witness_inputs" => Ok(BlobTable::WitnessInputs), - "leaf_aggregation_witness_jobs" => Ok(BlobTable::LeafAggregationWitnessJobs), - "node_aggregation_witness_jobs" => Ok(BlobTable::NodeAggregationWitnessJobs), - "scheduler_witness_jobs" => Ok(BlobTable::SchedulerWitnessJobs), - "prover_jobs" => Ok(BlobTable::ProverJobs), - _ => Err("Could not parse table name".to_string()), - } - } -} - -#[derive(Debug, StructOpt)] -#[structopt( - name = "Tool for purging blob from database", - about = "Tool to delete blob for individual tables from db" -)] -struct Opt { - /// Name of the table from which blobs would be deleted. - #[structopt(short = "t", long = "table_name", default_value = "witness_inputs")] - table: BlobTable, - /// Number of blobs purged in each batch. - #[structopt(short = "b", long = "batch_size", default_value = "20")] - batch_size: u8, -} - -fn purge_witness_inputs(pool: ConnectionPool, batch_size: u8) -> bool { - let l1_batches = pool - .access_storage_blocking() - .blocks_dal() - .get_l1_batches_with_blobs_in_db(batch_size); - if l1_batches.is_empty() { - return false; - } - println!("purging witness_inputs: {:?}", l1_batches); - pool.access_storage_blocking() - .blocks_dal() - .purge_blobs_from_db(l1_batches); - true -} - -fn purge_leaf_aggregation_witness_jobs(pool: ConnectionPool, batch_size: u8) -> bool { - let l1_batches = pool - .access_storage_blocking() - .witness_generator_dal() - .get_leaf_aggregation_l1_batches_with_blobs_in_db(batch_size); - if l1_batches.is_empty() { - return false; - } - println!("purging leaf_aggregation_witness_jobs: {:?}", l1_batches); - pool.access_storage_blocking() - .witness_generator_dal() - .purge_leaf_aggregation_blobs_from_db(l1_batches); - true -} - -fn purge_node_aggregation_witness_jobs(pool: ConnectionPool, batch_size: u8) -> bool { - let l1_batches = pool - .access_storage_blocking() - .witness_generator_dal() - .get_node_aggregation_l1_batches_with_blobs_in_db(batch_size); - if l1_batches.is_empty() { - return false; - } - println!("purging node_aggregation_witness_jobs: {:?}", l1_batches); - pool.access_storage_blocking() - .witness_generator_dal() - .purge_node_aggregation_blobs_from_db(l1_batches); - true -} - -fn purge_scheduler_witness_jobs(pool: ConnectionPool, batch_size: u8) -> bool { - let l1_batches = pool - .access_storage_blocking() - .witness_generator_dal() - .get_scheduler_l1_batches_with_blobs_in_db(batch_size); - if l1_batches.is_empty() { - return false; - } - println!("purging scheduler_witness_jobs: {:?}", l1_batches); - pool.access_storage_blocking() - .witness_generator_dal() - .purge_scheduler_blobs_from_db(l1_batches); - true -} - -fn purge_prover_jobs(pool: ConnectionPool, batch_size: u8) -> bool { - let job_ids = pool - .access_storage_blocking() - .prover_dal() - .get_l1_batches_with_blobs_in_db(batch_size); - if job_ids.is_empty() { - return false; - } - println!("purging prover_jobs: {:?}", job_ids); - pool.access_storage_blocking() - .prover_dal() - .purge_blobs_from_db(job_ids); - true -} - -fn main() { - let opt = Opt::from_args(); - println!("processing table: {:?}", opt.table); - let pool = ConnectionPool::new(Some(1), true); - let mut shall_purge = true; - while shall_purge { - shall_purge = match opt.table { - BlobTable::WitnessInputs => purge_witness_inputs(pool.clone(), opt.batch_size), - BlobTable::LeafAggregationWitnessJobs => { - purge_leaf_aggregation_witness_jobs(pool.clone(), opt.batch_size) - } - BlobTable::NodeAggregationWitnessJobs => { - purge_node_aggregation_witness_jobs(pool.clone(), opt.batch_size) - } - BlobTable::SchedulerWitnessJobs => { - purge_scheduler_witness_jobs(pool.clone(), opt.batch_size) - } - BlobTable::ProverJobs => purge_prover_jobs(pool.clone(), opt.batch_size), - }; - sleep(Duration::from_millis(WAIT_TIME_MILLI_SECONDS)); - } -} diff --git a/core/bin/circuit_synthesizer/Cargo.lock b/core/bin/circuit_synthesizer/Cargo.lock index 43e05c860621..8dedc1c8dd00 100644 --- a/core/bin/circuit_synthesizer/Cargo.lock +++ b/core/bin/circuit_synthesizer/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -143,8 +143,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -236,12 +236,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -285,7 +284,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "futures-channel", "futures-core", "futures-io", @@ -303,23 +302,24 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -330,13 +330,13 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -401,12 +401,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.1" @@ -421,9 +415,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beef" @@ -446,7 +440,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.26", + "futures 0.3.27", "hex", "lazy_static", "num_cpus", @@ -490,8 +484,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "regex", "rustc-hash", "shlex", @@ -588,9 +582,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -627,9 +621,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f0778972c64420fdedc63f09919c8a88bda7b25135357fd25a5d9f3257e832" +checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" dependencies = [ "memchr", "serde", @@ -702,9 +696,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -729,16 +723,16 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#1769d65aa592645a097af20b1739c7cacb0715c1" +source = "git+https://github.com/matter-labs/circuit_testing.git?branch=main#028864449036071cfb4e9ebe7ee4c5be59893031" dependencies = [ "bellman_ce", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", @@ -760,29 +754,6 @@ dependencies = [ "vec_map", ] -[[package]] -name = "cloud-storage" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7602ac4363f68ac757d6b87dd5d850549a14d37489902ae639c06ecec06ad275" -dependencies = [ - "async-trait", - "base64 0.13.1", - "bytes", - "chrono", - "dotenv", - "futures-util", - "hex", - "jsonwebtoken", - "lazy_static", - "openssl", - "percent-encoding", - "reqwest", - "serde", - "serde_json", - "tokio", -] - [[package]] name = "cloudabi" version = "0.0.3" @@ -795,7 +766,7 @@ dependencies = [ [[package]] name = "codegen" version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#4fb6397f778a580c9207ec23661228f5da7e66b4" +source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" dependencies = [ "ethereum-types", "franklin-crypto", @@ -833,14 +804,20 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] name = "const-oid" -version = "0.9.1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + +[[package]] +name = "const-oid" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -915,11 +892,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-epoch 0.9.13", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-epoch 0.9.14", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -934,12 +911,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -955,13 +932,13 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.13", - "crossbeam-utils 0.8.14", + "crossbeam-epoch 0.9.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -981,14 +958,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", - "memoffset 0.7.1", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -1010,7 +987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -1026,9 +1003,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -1039,6 +1016,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1094,13 +1081,13 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1109,8 +1096,8 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1134,9 +1121,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1146,34 +1133,34 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "scratch", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1194,10 +1181,10 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "strsim 0.10.0", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1207,8 +1194,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1221,13 +1208,24 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", + "crypto-bigint 0.3.2", + "pem-rfc7468", +] + [[package]] name = "der" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid", + "const-oid 0.9.2", "zeroize", ] @@ -1237,9 +1235,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1249,10 +1247,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustc_version", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1270,7 +1268,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1307,7 +1305,7 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", + "der 0.6.1", "elliptic-curve", "rfc6979", "signature", @@ -1339,13 +1337,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", - "crypto-bigint", - "der", + "crypto-bigint 0.4.9", + "der 0.6.1", "digest 0.10.6", "ff", "generic-array", "group", - "pkcs8", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1", "subtle", @@ -1467,9 +1465,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1506,10 +1504,22 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", +] + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", ] [[package]] @@ -1610,9 +1620,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -1625,9 +1635,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -1635,15 +1645,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -1664,9 +1674,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -1685,26 +1695,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -1718,9 +1728,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1769,9 +1779,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" @@ -1837,6 +1847,89 @@ dependencies = [ "web-sys", ] +[[package]] +name = "google-cloud-auth" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +dependencies = [ + "async-trait", + "base64 0.21.0", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "thiserror", + "time 0.3.20", + "tokio", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-default" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +dependencies = [ + "async-trait", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-storage", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +dependencies = [ + "reqwest", + "thiserror", + "tokio", +] + +[[package]] +name = "google-cloud-storage" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +dependencies = [ + "async-stream", + "base64 0.21.0", + "bytes", + "futures-util", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "regex", + "reqwest", + "ring", + "rsa", + "serde", + "serde_json", + "sha2 0.10.6", + "thiserror", + "time 0.3.20", + "tokio", + "tokio-util 0.7.7", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +dependencies = [ + "async-trait", +] + [[package]] name = "group" version = "0.12.1" @@ -1850,9 +1943,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -1863,7 +1956,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tracing", ] @@ -2007,6 +2100,15 @@ dependencies = [ "digest 0.10.6", ] +[[package]] +name = "home" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +dependencies = [ + "winapi", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2020,13 +2122,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", - "itoa 1.0.5", + "itoa 1.0.6", ] [[package]] @@ -2060,9 +2162,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -2073,7 +2175,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa 1.0.6", "pin-project-lite", "socket2", "tokio", @@ -2207,9 +2309,9 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2233,9 +2335,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" dependencies = [ "libc", "windows-sys 0.45.0", @@ -2255,9 +2357,9 @@ checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", @@ -2282,15 +2384,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -2310,7 +2412,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "futures-executor", "futures-util", "log", @@ -2356,7 +2458,7 @@ dependencies = [ "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tracing", "webpki-roots", ] @@ -2417,9 +2519,9 @@ checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.1", "proc-macro-crate", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2439,7 +2541,7 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tower", "tracing", ] @@ -2483,11 +2585,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ - "base64 0.12.3", + "base64 0.13.1", "pem", "ring", "serde", @@ -2530,6 +2632,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin", +] [[package]] name = "lazycell" @@ -2539,9 +2644,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" @@ -2553,6 +2658,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + [[package]] name = "librocksdb-sys" version = "0.6.1+6.28.2" @@ -2689,9 +2800,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -2732,9 +2843,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2743,8 +2854,8 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ - "crossbeam-epoch 0.9.13", - "crossbeam-utils 0.8.14", + "crossbeam-epoch 0.9.14", + "crossbeam-utils 0.8.15", "hashbrown 0.12.3", "metrics", "num_cpus", @@ -2760,6 +2871,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2777,14 +2898,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2849,15 +2970,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2898,20 +3010,21 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.2.6" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits", + "serde", ] [[package]] name = "num-bigint" -version = "0.3.3" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg 1.1.0", "num-integer", @@ -2920,15 +3033,20 @@ dependencies = [ ] [[package]] -name = "num-bigint" -version = "0.4.3" +name = "num-bigint-dig" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ - "autocfg 1.1.0", + "byteorder", + "lazy_static", + "libm", "num-integer", + "num-iter", "num-traits", - "serde", + "rand 0.8.5", + "smallvec", + "zeroize", ] [[package]] @@ -2967,9 +3085,9 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3025,6 +3143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg 1.1.0", + "libm", ] [[package]] @@ -3048,9 +3167,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -3079,9 +3198,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3110,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.7", "futures-channel", "futures-executor", "futures-util", @@ -3142,7 +3261,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.26", + "futures 0.3.27", "futures-util", "http", "opentelemetry", @@ -3240,9 +3359,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3311,9 +3430,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -3345,13 +3464,20 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ "base64 0.13.1", - "once_cell", - "regex", +] + +[[package]] +name = "pem-rfc7468" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +dependencies = [ + "base64ct", ] [[package]] @@ -3362,9 +3488,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -3372,9 +3498,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -3382,22 +3508,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "pest_meta" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", @@ -3429,9 +3555,9 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3446,14 +3572,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +dependencies = [ + "der 0.5.1", + "pkcs8 0.8.0", + "zeroize", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der 0.5.1", + "spki 0.5.4", + "zeroize", +] + [[package]] name = "pkcs8" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -3464,16 +3612,18 @@ checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.45.0", ] [[package]] @@ -3503,9 +3653,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", "toml_edit", @@ -3518,9 +3668,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "version_check", ] @@ -3530,8 +3680,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "version_check", ] @@ -3552,9 +3702,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -3608,9 +3758,9 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3626,11 +3776,11 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "log", "num_cpus", "rand 0.4.6", @@ -3645,7 +3795,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "libc", "mach", "once_cell", @@ -3672,11 +3822,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", ] [[package]] @@ -3875,18 +4025,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.1" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307f7aacdbab3f0adee67d52739a1d71112cc068d6fab169ddeb18e48877fad" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -3894,13 +4044,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-utils 0.8.14", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -3959,15 +4109,6 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.14" @@ -3989,6 +4130,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -4001,7 +4143,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", @@ -4038,7 +4180,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] @@ -4089,6 +4231,26 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rsa" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" +dependencies = [ + "byteorder", + "digest 0.10.6", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8 0.8.0", + "rand_core 0.6.4", + "smallvec", + "subtle", + "zeroize", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -4124,9 +4286,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", @@ -4171,15 +4333,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "salsa20" @@ -4207,9 +4369,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -4244,9 +4406,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", - "der", + "der 0.6.1", "generic-array", - "pkcs8", + "pkcs8 0.9.0", "subtle", "zeroize", ] @@ -4304,9 +4466,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "send_wrapper" @@ -4316,9 +4478,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "sentry" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" +checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" dependencies = [ "httpdate", "native-tls", @@ -4326,6 +4488,7 @@ dependencies = [ "sentry-backtrace", "sentry-contexts", "sentry-core", + "sentry-debug-images", "sentry-panic", "tokio", "ureq", @@ -4333,9 +4496,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" +checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" dependencies = [ "backtrace", "once_cell", @@ -4345,9 +4508,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" +checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" dependencies = [ "hostname", "libc", @@ -4359,9 +4522,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" +checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" dependencies = [ "once_cell", "rand 0.8.5", @@ -4370,11 +4533,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "sentry-debug-images" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +dependencies = [ + "findshlibs", + "once_cell", + "sentry-core", +] + [[package]] name = "sentry-panic" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" +checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4382,9 +4556,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" +checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" dependencies = [ "debugid", "getrandom 0.2.8", @@ -4392,39 +4566,39 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.17", + "time 0.3.20", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "indexmap", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -4436,7 +4610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -4458,9 +4632,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4550,9 +4724,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -4560,9 +4734,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -4579,13 +4753,14 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", - "num-bigint 0.2.6", + "num-bigint 0.4.3", "num-traits", + "thiserror", + "time 0.3.20", ] [[package]] @@ -4596,9 +4771,9 @@ checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -4611,9 +4786,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -4627,7 +4802,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.26", + "futures 0.3.27", "http", "httparse", "log", @@ -4641,6 +4816,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der 0.5.1", +] + [[package]] name = "spki" version = "0.6.0" @@ -4648,7 +4833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", ] [[package]] @@ -4693,9 +4878,9 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.7", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "dirs", "either", "futures-channel", @@ -4741,14 +4926,14 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", "serde_json", "sha2 0.9.9", "sqlx-core", "sqlx-rt", - "syn 1.0.107", + "syn 1.0.109", "url", ] @@ -4810,9 +4995,9 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4831,10 +5016,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustversion", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -4856,19 +5041,19 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "unicode-ident", ] [[package]] name = "sync_vm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "arrayvec 0.7.2", "cs_derive", @@ -4900,16 +5085,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys 0.42.0", ] [[package]] @@ -4927,9 +5111,9 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4943,30 +5127,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -4982,11 +5167,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ - "itoa 1.0.5", + "itoa 1.0.6", "serde", "time-core", "time-macros", @@ -5000,9 +5185,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -5042,9 +5227,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg 1.1.0", "bytes", @@ -5057,7 +5242,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -5076,9 +5261,9 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5104,9 +5289,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -5129,9 +5314,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6a3b08b64e6dfad376fa2432c7b1f01522e37a623c3050bc95db2d3ff21583" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -5144,19 +5329,19 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" [[package]] name = "toml_edit" -version = "0.18.1" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" dependencies = [ "indexmap", - "nom8", "toml_datetime", + "winnow", ] [[package]] @@ -5196,10 +5381,10 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", "prost-build", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5216,7 +5401,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tower-layer", "tower-service", "tracing", @@ -5253,9 +5438,9 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5328,7 +5513,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.17", + "time 0.3.20", "tracing", "tracing-core", "tracing-log", @@ -5374,17 +5559,26 @@ dependencies = [ "libc", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -5450,6 +5644,12 @@ dependencies = [ "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" + [[package]] name = "uuid" version = "1.3.0" @@ -5584,9 +5784,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -5608,7 +5808,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "wasm-bindgen-macro-support", ] @@ -5618,9 +5818,9 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5666,7 +5866,7 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures 0.3.26", + "futures 0.3.27", "futures-timer", "headers", "hex", @@ -5704,15 +5904,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -5726,9 +5917,9 @@ dependencies = [ [[package]] name = "whoami" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45dbc71f0cdca27dc261a9bd37ddec174e4a0af2b900b890f378460f745426e3" +checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68" dependencies = [ "wasm-bindgen", "web-sys", @@ -5791,9 +5982,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -5806,45 +5997,54 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -5870,7 +6070,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=main#ca203a25cbff50ad623630e393dd041aca58038d" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" dependencies = [ "blake2 0.10.6", "k256", @@ -5886,8 +6086,8 @@ dependencies = [ [[package]] name = "zkevm-assembly" -version = "1.3.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=main#b995edca9b3e263f0d989f3c8c31eed1450fe3fc" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" dependencies = [ "env_logger 0.9.3", "hex", @@ -5897,6 +6097,7 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "regex", + "sha3 0.10.6", "smallvec", "structopt", "thiserror", @@ -5906,7 +6107,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=main#b9fd187b477358465b7e332f72e5bebfe64f02b8" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" dependencies = [ "bitflags", "ethereum-types", @@ -5917,7 +6118,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=main#e4e6aaf78b45280ec99057ba45a393776d8e45a2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" dependencies = [ "bincode", "blake2 0.10.6", @@ -5958,7 +6159,7 @@ version = "0.1.0" dependencies = [ "bincode", "ctrlc", - "futures 0.3.26", + "futures 0.3.27", "local-ip-address", "metrics", "prometheus_exporter", @@ -5998,6 +6199,7 @@ dependencies = [ "ethabi", "hex", "once_cell", + "serde", "serde_json", "zksync_utils", ] @@ -6059,7 +6261,10 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ - "cloud-storage", + "google-cloud-auth", + "google-cloud-default", + "google-cloud-storage", + "http", "metrics", "tokio", "vlog", @@ -6074,6 +6279,8 @@ dependencies = [ "metrics", "reqwest", "vlog", + "zksync_config", + "zksync_utils", ] [[package]] @@ -6151,9 +6358,10 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.26", + "futures 0.3.27", "hex", "num 0.3.1", + "reqwest", "serde", "thiserror", "tokio", diff --git a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs index 5122abb6ef4d..9604f65149b2 100644 --- a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs @@ -21,6 +21,7 @@ use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; use zksync_object_store::object_store::{create_object_store_from_env, PROVER_JOBS_BUCKET_PATH}; use zksync_prover_utils::numeric_index_to_circuit_name; +use zksync_prover_utils::region_fetcher::get_region; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::proofs::ProverJobMetadata; @@ -87,10 +88,30 @@ impl JobProcessor for CircuitSynthesizer { &self, connection_pool: ConnectionPool, ) -> Option<(Self::JobId, Self::Job)> { - let prover_job = connection_pool - .access_storage_blocking() - .prover_dal() - .get_next_prover_job(self.config.generation_timeout(), self.config.max_attempts)?; + let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); + let prover_group_config = ProverGroupConfig::from_env(); + + let circuit_ids = prover_group_config + .get_circuit_ids_for_group_id(config.prover_group_id) + .unwrap_or(vec![]); + if prover_group_config.is_specialized_group_id(config.prover_group_id) { + assert!(!circuit_ids.is_empty(), "No circuits found for specialized prover group id :{}", config.prover_group_id); + } + vlog::info!("Fetching prover jobs for group: {} and circuits: {:?}", config.prover_group_id, circuit_ids); + let circuit_types: Vec = circuit_ids.iter() + .map(|&id| numeric_index_to_circuit_name(id).unwrap_or_else(|| panic!("unknown id :{}", id)).to_string()) + .collect(); + let prover_job = if circuit_types.is_empty() { + connection_pool + .access_storage_blocking() + .prover_dal() + .get_next_prover_job(self.config.generation_timeout(), self.config.max_attempts)? + } else { + connection_pool + .access_storage_blocking() + .prover_dal() + .get_next_prover_job_by_circuit_types(self.config.generation_timeout(), self.config.max_attempts, circuit_types)? + }; let job_id = prover_job.id; Some((job_id, get_circuit(prover_job))) } @@ -121,19 +142,17 @@ impl JobProcessor for CircuitSynthesizer { _started_at: Instant, artifacts: Self::JobArtifacts, ) { - vlog::info!("Finished circuit synthesis for job: {}", job_id); + let region = get_region().await; + vlog::info!("Finished circuit synthesis for job: {} in region: {}", job_id, region); let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); let (assembly, circuit_id) = artifacts; let now = Instant::now(); - let specialized_prover_group_id = ProverGroupConfig::from_env() - .get_group_id_for_circuit_id(circuit_id) - .unwrap_or_else(|| panic!("No specialized prover running for circuit: {}", circuit_id)); while now.elapsed() < config.prover_instance_wait_timeout() { let optional_prover_instance = pool .clone() .access_storage_blocking() .gpu_prover_queue_dal() - .get_free_prover_instance(config.gpu_prover_queue_timeout(), specialized_prover_group_id); + .get_free_prover_instance(config.gpu_prover_queue_timeout(), config.prover_group_id, region.clone()); match optional_prover_instance { Some(address) => { vlog::info!( diff --git a/core/bin/circuit_synthesizer/src/main.rs b/core/bin/circuit_synthesizer/src/main.rs index 76be71623906..60f4f55827b9 100644 --- a/core/bin/circuit_synthesizer/src/main.rs +++ b/core/bin/circuit_synthesizer/src/main.rs @@ -44,9 +44,16 @@ pub async fn wait_for_tasks(task_futures: Vec>) { #[tokio::main] async fn main() { let opt = Opt::from_args(); - vlog::init(); + let sentry_guard = vlog::init(); + match sentry_guard { + Some(_) => vlog::info!( + "Starting Sentry url: {}", + std::env::var("MISC_SENTRY_URL").unwrap(), + ), + None => vlog::info!("No sentry url configured"), + } let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); - let pool = ConnectionPool::new(Some(1), true); + let pool = ConnectionPool::new(None, true); let circuit_synthesizer = CircuitSynthesizer::new(config.clone()); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index e160fe4b93f8..2c3c4efc3164 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -117,9 +117,12 @@ impl ContractVerifier { let zksolc = ZkSolc::new(zksolc_path, solc_path); - let output = time::timeout(config.compilation_timeout(), zksolc.async_compile(&input)) - .await - .map_err(|_| ContractVerifierError::CompilationTimeout)??; + let output = time::timeout( + config.compilation_timeout(), + zksolc.async_compile(&input, request.req.is_system), + ) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)??; if let Some(errors) = output.get("errors") { let errors = errors.as_array().unwrap().clone(); diff --git a/core/bin/contract-verifier/src/zksolc_utils.rs b/core/bin/contract-verifier/src/zksolc_utils.rs index badc709c396e..f8cd8c0e646b 100644 --- a/core/bin/contract-verifier/src/zksolc_utils.rs +++ b/core/bin/contract-verifier/src/zksolc_utils.rs @@ -67,10 +67,15 @@ impl ZkSolc { pub async fn async_compile( &self, input: &CompilerInput, + is_system_flag: bool, ) -> Result { use tokio::io::AsyncWriteExt; let content = serde_json::to_vec(input).unwrap(); - let mut child = tokio::process::Command::new(&self.zksolc_path) + let mut command = tokio::process::Command::new(&self.zksolc_path); + if is_system_flag { + command.arg("--system-mode"); + } + let mut child = command .arg("--standard-json") .arg("--solc") .arg(self.solc_path.to_str().unwrap()) diff --git a/core/bin/events_tx_initiator_address_migration/Cargo.toml b/core/bin/events_tx_initiator_address_migration/Cargo.toml deleted file mode 100644 index fffbe2f35b78..000000000000 --- a/core/bin/events_tx_initiator_address_migration/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "events_tx_initiator_address_migration" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tokio = { version = "1" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } \ No newline at end of file diff --git a/core/bin/events_tx_initiator_address_migration/src/main.rs b/core/bin/events_tx_initiator_address_migration/src/main.rs deleted file mode 100644 index 9eac8a81d7b6..000000000000 --- a/core/bin/events_tx_initiator_address_migration/src/main.rs +++ /dev/null @@ -1,24 +0,0 @@ -use zksync_dal::ConnectionPool; -use zksync_types::MiniblockNumber; - -#[tokio::main] -async fn main() { - let pool = ConnectionPool::new(Some(1), true); - let mut storage = pool.access_storage().await; - let last_sealed_miniblock = storage.blocks_dal().get_sealed_miniblock_number(); - - let mut current_miniblock_number = MiniblockNumber(0); - let block_range = 10000u32; - while current_miniblock_number <= last_sealed_miniblock { - let to_miniblock_number = current_miniblock_number + block_range - 1; - storage - .events_dal() - .set_tx_initiator_address(current_miniblock_number, to_miniblock_number); - println!( - "Processed miniblocks {}-{}", - current_miniblock_number, to_miniblock_number - ); - - current_miniblock_number += block_range; - } -} diff --git a/core/bin/prover/Cargo.lock b/core/bin/prover/Cargo.lock index 862ae45ff602..7854ce070c58 100644 --- a/core/bin/prover/Cargo.lock +++ b/core/bin/prover/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -144,8 +144,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -237,12 +237,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -286,7 +285,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "futures-channel", "futures-core", "futures-io", @@ -304,23 +303,24 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -331,13 +331,13 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -402,12 +402,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.1" @@ -422,9 +416,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beef" @@ -447,7 +441,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.26", + "futures 0.3.27", "hex", "lazy_static", "num_cpus", @@ -494,8 +488,8 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "regex", "rustc-hash", "shlex", @@ -593,9 +587,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -632,9 +626,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f0778972c64420fdedc63f09919c8a88bda7b25135357fd25a5d9f3257e832" +checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" dependencies = [ "memchr", "serde", @@ -713,9 +707,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -740,16 +734,16 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#1769d65aa592645a097af20b1739c7cacb0715c1" +source = "git+https://github.com/matter-labs/circuit_testing.git?branch=main#028864449036071cfb4e9ebe7ee4c5be59893031" dependencies = [ "bellman_ce", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", @@ -771,29 +765,6 @@ dependencies = [ "vec_map", ] -[[package]] -name = "cloud-storage" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7602ac4363f68ac757d6b87dd5d850549a14d37489902ae639c06ecec06ad275" -dependencies = [ - "async-trait", - "base64 0.13.1", - "bytes 1.4.0", - "chrono", - "dotenv", - "futures-util", - "hex", - "jsonwebtoken", - "lazy_static", - "openssl", - "percent-encoding", - "reqwest", - "serde", - "serde_json", - "tokio 1.25.0", -] - [[package]] name = "cloudabi" version = "0.0.3" @@ -806,7 +777,7 @@ dependencies = [ [[package]] name = "codegen" version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#4fb6397f778a580c9207ec23661228f5da7e66b4" +source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" dependencies = [ "ethereum-types", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -854,14 +825,20 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] name = "const-oid" -version = "0.9.1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + +[[package]] +name = "const-oid" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -945,11 +922,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-epoch 0.9.13", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-epoch 0.9.14", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -964,12 +941,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -985,13 +962,13 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.13", - "crossbeam-utils 0.8.14", + "crossbeam-epoch 0.9.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -1011,14 +988,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", - "memoffset 0.7.1", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -1040,7 +1017,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -1056,9 +1033,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -1069,6 +1046,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1124,13 +1111,13 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1139,8 +1126,8 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1164,9 +1151,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1176,34 +1163,34 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "scratch", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1224,10 +1211,10 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "strsim 0.10.0", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1237,8 +1224,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1251,13 +1238,24 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", + "crypto-bigint 0.3.2", + "pem-rfc7468", +] + [[package]] name = "der" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid", + "const-oid 0.9.2", "zeroize", ] @@ -1267,9 +1265,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1279,10 +1277,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustc_version", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1300,7 +1298,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1337,7 +1335,7 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", + "der 0.6.1", "elliptic-curve", "rfc6979", "signature", @@ -1369,13 +1367,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", - "crypto-bigint", - "der", + "crypto-bigint 0.4.9", + "der 0.6.1", "digest 0.10.6", "ff", "generic-array", "group", - "pkcs8", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1", "subtle", @@ -1497,9 +1495,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1536,10 +1534,22 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", +] + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", ] [[package]] @@ -1670,9 +1680,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -1685,9 +1695,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -1695,15 +1705,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -1724,9 +1734,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -1749,32 +1759,32 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c4e684ddb2d8a4db5ca8a02b35156da129674ba4412b6f528698d58c594954" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "tokio 0.2.25", ] [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -1788,9 +1798,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1839,9 +1849,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" @@ -1907,15 +1917,98 @@ dependencies = [ "web-sys", ] +[[package]] +name = "google-cloud-auth" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44074eed3f9f0c05a522090f0cf1cfcdaef29965424d07908a6a372ffdee0985" +dependencies = [ + "async-trait", + "base64 0.21.0", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest", + "serde", + "serde_json", + "thiserror", + "time 0.3.20", + "tokio 1.26.0", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-default" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d47d0a154793b622b0aa39fda79d40694b6ef9aa8c932c0342f2088502aa3ea" +dependencies = [ + "async-trait", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-storage", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +dependencies = [ + "reqwest", + "thiserror", + "tokio 1.26.0", +] + +[[package]] +name = "google-cloud-storage" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ed4e4f53bc4816db6f5669fb079338a8b6375a985fd6c9a1f3f8a864922541" +dependencies = [ + "async-stream", + "base64 0.21.0", + "bytes 1.4.0", + "futures-util", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "regex", + "reqwest", + "ring", + "rsa", + "serde", + "serde_json", + "sha2 0.10.6", + "thiserror", + "time 0.3.20", + "tokio 1.26.0", + "tokio-util 0.7.7", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9fa5c241ab09d3531496127ef107a29cc2a8fde63676f7cbbe56a8a5e75883" +dependencies = [ + "async-trait", +] + [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bindgen", "crossbeam 0.8.2", "derivative", - "futures 0.3.26", + "futures 0.3.27", "futures-locks", "num_cpus", ] @@ -1923,7 +2016,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -1949,9 +2042,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes 1.4.0", "fnv", @@ -1961,8 +2054,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.25.0", - "tokio-util 0.7.6", + "tokio 1.26.0", + "tokio-util 0.7.7", "tracing", ] @@ -2106,6 +2199,15 @@ dependencies = [ "digest 0.10.6", ] +[[package]] +name = "home" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +dependencies = [ + "winapi", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2119,13 +2221,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes 1.4.0", "fnv", - "itoa 1.0.5", + "itoa 1.0.6", ] [[package]] @@ -2159,9 +2261,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -2172,10 +2274,10 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.5", + "itoa 1.0.6", "pin-project-lite 0.2.9", "socket2", - "tokio 1.25.0", + "tokio 1.26.0", "tower-service", "tracing", "want", @@ -2192,7 +2294,7 @@ dependencies = [ "log", "rustls", "rustls-native-certs", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-rustls", "webpki-roots", ] @@ -2205,7 +2307,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-io-timeout", ] @@ -2218,7 +2320,7 @@ dependencies = [ "bytes 1.4.0", "hyper", "native-tls", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-native-tls", ] @@ -2306,9 +2408,9 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2332,9 +2434,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" dependencies = [ "libc", "windows-sys 0.45.0", @@ -2354,9 +2456,9 @@ checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", @@ -2381,15 +2483,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -2409,7 +2511,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.26", + "futures 0.3.27", "futures-executor", "futures-util", "log", @@ -2453,9 +2555,9 @@ dependencies = [ "rustls-native-certs", "soketto", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-rustls", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tracing", "webpki-roots", ] @@ -2484,7 +2586,7 @@ dependencies = [ "serde_json", "soketto", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tracing", "wasm-bindgen-futures", ] @@ -2504,7 +2606,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tracing", ] @@ -2516,9 +2618,9 @@ checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.1", "proc-macro-crate", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2536,9 +2638,9 @@ dependencies = [ "serde", "serde_json", "soketto", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-stream", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tower", "tracing", ] @@ -2582,11 +2684,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ - "base64 0.12.3", + "base64 0.13.1", "pem", "ring", "serde", @@ -2629,6 +2731,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin", +] [[package]] name = "lazycell" @@ -2638,9 +2743,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" @@ -2652,6 +2757,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "libm" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" + [[package]] name = "librocksdb-sys" version = "0.6.1+6.28.2" @@ -2788,9 +2899,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -2821,7 +2932,7 @@ dependencies = [ "portable-atomic", "quanta", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tracing", ] @@ -2831,9 +2942,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2842,8 +2953,8 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ - "crossbeam-epoch 0.9.13", - "crossbeam-utils 0.8.14", + "crossbeam-epoch 0.9.14", + "crossbeam-utils 0.8.15", "hashbrown 0.12.3", "metrics", "num_cpus", @@ -2859,6 +2970,16 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2876,14 +2997,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2948,15 +3069,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2997,20 +3109,21 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.2.6" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits", + "serde", ] [[package]] name = "num-bigint" -version = "0.3.3" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg 1.1.0", "num-integer", @@ -3019,15 +3132,20 @@ dependencies = [ ] [[package]] -name = "num-bigint" -version = "0.4.3" +name = "num-bigint-dig" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ - "autocfg 1.1.0", + "byteorder", + "lazy_static", + "libm", "num-integer", + "num-iter", "num-traits", - "serde", + "rand 0.8.5", + "smallvec", + "zeroize", ] [[package]] @@ -3066,9 +3184,9 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3124,6 +3242,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg 1.1.0", + "libm", ] [[package]] @@ -3147,9 +3266,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -3178,9 +3297,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3209,7 +3328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.7", "futures-channel", "futures-executor", "futures-util", @@ -3241,7 +3360,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.26", + "futures 0.3.27", "futures-util", "http", "opentelemetry", @@ -3250,7 +3369,7 @@ dependencies = [ "prost-build", "reqwest", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tonic", "tonic-build", ] @@ -3339,9 +3458,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3410,9 +3529,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -3444,13 +3563,20 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ "base64 0.13.1", - "once_cell", - "regex", +] + +[[package]] +name = "pem-rfc7468" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +dependencies = [ + "base64ct", ] [[package]] @@ -3461,9 +3587,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -3471,9 +3597,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -3481,22 +3607,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "pest_meta" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", @@ -3528,9 +3654,9 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3551,14 +3677,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +dependencies = [ + "der 0.5.1", + "pkcs8 0.8.0", + "zeroize", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der 0.5.1", + "spki 0.5.4", + "zeroize", +] + [[package]] name = "pkcs8" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -3569,16 +3717,18 @@ checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite 0.2.9", + "windows-sys 0.45.0", ] [[package]] @@ -3608,12 +3758,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit 0.18.1", + "toml_edit 0.19.6", ] [[package]] @@ -3623,9 +3773,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "version_check", ] @@ -3635,8 +3785,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "version_check", ] @@ -3657,9 +3807,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -3670,7 +3820,7 @@ version = "1.0.0" dependencies = [ "metrics", "metrics-exporter-prometheus", - "tokio 1.25.0", + "tokio 1.26.0", "vlog", "zksync_config", ] @@ -3713,9 +3863,9 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3731,11 +3881,11 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "log", "num_cpus", "rand 0.4.6", @@ -3750,7 +3900,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "libc", "mach", "once_cell", @@ -3777,11 +3927,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", ] [[package]] @@ -3980,18 +4130,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.1" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307f7aacdbab3f0adee67d52739a1d71112cc068d6fab169ddeb18e48877fad" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags", ] [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -3999,13 +4149,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-utils 0.8.14", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -4064,15 +4214,6 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.14" @@ -4094,6 +4235,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -4103,10 +4245,10 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.6", + "tokio-util 0.7.7", "tower-service", "url", "wasm-bindgen", @@ -4143,7 +4285,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] @@ -4194,6 +4336,26 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rsa" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" +dependencies = [ + "byteorder", + "digest 0.10.6", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8 0.8.0", + "rand_core 0.6.4", + "smallvec", + "subtle", + "zeroize", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -4229,9 +4391,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", @@ -4276,15 +4438,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "salsa20" @@ -4312,9 +4474,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -4349,9 +4511,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", - "der", + "der 0.6.1", "generic-array", - "pkcs8", + "pkcs8 0.9.0", "subtle", "zeroize", ] @@ -4409,9 +4571,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "send_wrapper" @@ -4421,9 +4583,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "sentry" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" +checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" dependencies = [ "httpdate", "native-tls", @@ -4431,16 +4593,17 @@ dependencies = [ "sentry-backtrace", "sentry-contexts", "sentry-core", + "sentry-debug-images", "sentry-panic", - "tokio 1.25.0", + "tokio 1.26.0", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" +checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" dependencies = [ "backtrace", "once_cell", @@ -4450,9 +4613,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" +checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" dependencies = [ "hostname", "libc", @@ -4464,9 +4627,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" +checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" dependencies = [ "once_cell", "rand 0.8.5", @@ -4475,11 +4638,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "sentry-debug-images" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +dependencies = [ + "findshlibs", + "once_cell", + "sentry-core", +] + [[package]] name = "sentry-panic" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" +checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4487,9 +4661,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" +checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" dependencies = [ "debugid", "getrandom 0.2.8", @@ -4497,39 +4671,39 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.17", + "time 0.3.20", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "indexmap", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -4541,7 +4715,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.5", + "itoa 1.0.6", "ryu", "serde", ] @@ -4563,9 +4737,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4670,9 +4844,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -4680,9 +4854,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -4699,13 +4873,14 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", - "num-bigint 0.2.6", + "num-bigint 0.4.3", "num-traits", + "thiserror", + "time 0.3.20", ] [[package]] @@ -4716,9 +4891,9 @@ checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -4731,9 +4906,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -4747,7 +4922,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes 1.4.0", - "futures 0.3.26", + "futures 0.3.27", "http", "httparse", "log", @@ -4761,6 +4936,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der 0.5.1", +] + [[package]] name = "spki" version = "0.6.0" @@ -4768,7 +4953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", ] [[package]] @@ -4813,9 +4998,9 @@ dependencies = [ "bytes 1.4.0", "chrono", "crc", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.7", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "dirs", "either", "futures-channel", @@ -4861,14 +5046,14 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", "serde_json", "sha2 0.9.9", "sqlx-core", "sqlx-rt", - "syn 1.0.107", + "syn 1.0.109", "url", ] @@ -4930,9 +5115,9 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4951,10 +5136,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustversion", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -4976,19 +5161,19 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "unicode-ident", ] [[package]] name = "sync_vm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "arrayvec 0.7.2", "cs_derive", @@ -5020,16 +5205,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys 0.42.0", ] [[package]] @@ -5047,9 +5231,9 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5063,30 +5247,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -5102,11 +5287,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ - "itoa 1.0.5", + "itoa 1.0.6", "serde", "time-core", "time-macros", @@ -5120,9 +5305,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -5173,9 +5358,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", @@ -5188,7 +5373,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -5198,7 +5383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -5207,9 +5392,9 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5219,7 +5404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -5229,19 +5414,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", - "tokio 1.25.0", + "tokio 1.26.0", "webpki", ] [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -5255,29 +5440,29 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] name = "tokio-util" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6a3b08b64e6dfad376fa2432c7b1f01522e37a623c3050bc95db2d3ff21583" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes 1.4.0", "futures-core", "futures-io", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", "tracing", ] [[package]] name = "toml_datetime" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" [[package]] name = "toml_edit" @@ -5292,13 +5477,13 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.18.1" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" dependencies = [ "indexmap", - "nom8", "toml_datetime", + "winnow", ] [[package]] @@ -5322,7 +5507,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-stream", "tokio-util 0.6.10", "tower", @@ -5338,10 +5523,10 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", "prost-build", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5357,8 +5542,8 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.25.0", - "tokio-util 0.7.6", + "tokio 1.26.0", + "tokio-util 0.7.7", "tower-layer", "tower-service", "tracing", @@ -5395,9 +5580,9 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -5470,7 +5655,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.17", + "time 0.3.20", "tracing", "tracing-core", "tracing-log", @@ -5516,17 +5701,26 @@ dependencies = [ "libc", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -5592,6 +5786,12 @@ dependencies = [ "serde", ] +[[package]] +name = "urlencoding" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" + [[package]] name = "uuid" version = "1.3.0" @@ -5726,9 +5926,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -5750,7 +5950,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "wasm-bindgen-macro-support", ] @@ -5760,9 +5960,9 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5808,7 +6008,7 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures 0.3.26", + "futures 0.3.27", "futures-timer", "headers", "hex", @@ -5846,15 +6046,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -5868,9 +6059,9 @@ dependencies = [ [[package]] name = "whoami" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45dbc71f0cdca27dc261a9bd37ddec174e4a0af2b900b890f378460f745426e3" +checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68" dependencies = [ "wasm-bindgen", "web-sys", @@ -5933,9 +6124,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -5948,45 +6139,54 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -6012,7 +6212,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=main#ca203a25cbff50ad623630e393dd041aca58038d" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" dependencies = [ "blake2 0.10.6", "k256", @@ -6028,8 +6228,8 @@ dependencies = [ [[package]] name = "zkevm-assembly" -version = "1.3.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=main#b995edca9b3e263f0d989f3c8c31eed1450fe3fc" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" dependencies = [ "env_logger 0.9.3", "hex", @@ -6039,6 +6239,7 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "regex", + "sha3 0.10.6", "smallvec", "structopt", "thiserror", @@ -6048,7 +6249,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=main#b9fd187b477358465b7e332f72e5bebfe64f02b8" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" dependencies = [ "bitflags", "ethereum-types", @@ -6059,7 +6260,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=main#e4e6aaf78b45280ec99057ba45a393776d8e45a2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" dependencies = [ "bincode", "blake2 0.10.6", @@ -6100,12 +6301,12 @@ version = "1.0.0" dependencies = [ "async-trait", "convert_case 0.6.0", - "futures 0.3.26", + "futures 0.3.27", "hex", "serde", "serde_json", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "zksync_config", "zksync_contracts", "zksync_dal", @@ -6137,6 +6338,7 @@ dependencies = [ "ethabi", "hex", "once_cell", + "serde", "serde_json", "zksync_utils", ] @@ -6196,7 +6398,7 @@ dependencies = [ "parity-crypto", "serde", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "vlog", "zksync_config", "zksync_contracts", @@ -6236,9 +6438,12 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ - "cloud-storage", + "google-cloud-auth", + "google-cloud-default", + "google-cloud-storage", + "http", "metrics", - "tokio 1.25.0", + "tokio 1.26.0", "vlog", "zksync_config", "zksync_types", @@ -6253,7 +6458,7 @@ dependencies = [ "chrono", "ctrlc", "ethabi", - "futures 0.3.26", + "futures 0.3.27", "hex", "local-ip-address", "metrics", @@ -6265,7 +6470,7 @@ dependencies = [ "serde_json", "setup_key_generator_and_server", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "vlog", "zkevm_test_harness", "zksync_circuit_breaker", @@ -6285,6 +6490,8 @@ dependencies = [ "metrics", "reqwest", "vlog", + "zksync_config", + "zksync_utils", ] [[package]] @@ -6351,12 +6558,13 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.26", + "futures 0.3.27", "hex", "num 0.3.1", + "reqwest", "serde", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "zk_evm", "zksync_basic_types", ] diff --git a/core/bin/prover/src/main.rs b/core/bin/prover/src/main.rs index aede401a704d..ad3f75441149 100644 --- a/core/bin/prover/src/main.rs +++ b/core/bin/prover/src/main.rs @@ -15,12 +15,13 @@ use zksync_config::{ configs::api::Prometheus as PrometheusConfig, ApiConfig, ProverConfig, ProverConfigs, ZkSyncConfig, }; -use zksync_dal::gpu_prover_queue_dal::SocketAddress; +use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; use crate::artifact_provider::ProverArtifactProvider; use crate::prover::ProverReporter; use crate::prover_params::ProverParams; +use zksync_prover_utils::region_fetcher::get_region; use crate::socket_listener::incoming_socket_listener; use crate::synthesized_circuit_provider::SynthesizedCircuitProvider; @@ -33,9 +34,11 @@ mod synthesized_circuit_provider; pub async fn wait_for_tasks(task_futures: Vec>) { match future::select_all(task_futures).await.0 { Ok(_) => { + graceful_shutdown(); vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); } Err(error) => { + graceful_shutdown(); vlog::info!( "One of the tokio actors unexpectedly finished with error: {:?}", error @@ -44,6 +47,20 @@ pub async fn wait_for_tasks(task_futures: Vec>) { } } +fn graceful_shutdown() { + let pool = ConnectionPool::new(Some(1), true); + let host = local_ip().expect("Failed obtaining local IP address"); + let port = ProverConfigs::from_env().non_gpu.assembly_receiver_port; + let address = SocketAddress { + host, + port, + }; + pool.clone() + .access_storage_blocking() + .gpu_prover_queue_dal() + .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0); +} + fn get_ram_per_gpu() -> u64 { let device_info = gpu_prover::cuda_bindings::device_info(0).unwrap(); let ram_in_gb: u64 = device_info.total / (1024 * 1024 * 1024); @@ -96,6 +113,7 @@ async fn main() { ), None => vlog::info!("No sentry url configured"), } + let region = get_region().await; let (stop_signal_sender, mut stop_signal_receiver) = mpsc::channel(256); @@ -130,7 +148,7 @@ async fn main() { let circuit_ids = ProverGroupConfig::from_env() .get_circuit_ids_for_group_id(prover_config.specialized_prover_group_id); - vlog::info!("Starting proof generation for circuits: {:?}", circuit_ids); + vlog::info!("Starting proof generation for circuits: {:?} in region: {} with group-id: {}", circuit_ids, region, prover_config.specialized_prover_group_id); let mut tasks: Vec> = vec![]; tasks.push(prometheus_exporter::run_prometheus_exporter( @@ -161,13 +179,14 @@ async fn main() { producer, ConnectionPool::new(Some(1), true), prover_config.specialized_prover_group_id, + region ))); let artifact_provider = ProverArtifactProvider {}; let prover_job_reporter = ProverReporter { pool: ConnectionPool::new(Some(1), true), config: prover_config.clone(), - processed_by: std::env::var("POD_NAME").unwrap_or("Unknown".to_string()), + processed_by: env::var("POD_NAME").unwrap_or("Unknown".to_string()), }; let params: ProverParams = prover_config.clone().into(); diff --git a/core/bin/prover/src/prover.rs b/core/bin/prover/src/prover.rs index 1503ca881eb3..a38380a43f94 100644 --- a/core/bin/prover/src/prover.rs +++ b/core/bin/prover/src/prover.rs @@ -2,16 +2,13 @@ use std::time::Duration; use prover_service::JobResult::{Failure, ProofGenerated}; use prover_service::{JobReporter, JobResult}; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::{ZkSyncProof}; +use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; use zkevm_test_harness::pairing::bn256::Bn256; - use zksync_config::ProverConfig; use zksync_dal::ConnectionPool; - use zksync_object_store::object_store::{create_object_store_from_env, PROVER_JOBS_BUCKET_PATH}; - #[derive(Debug)] pub struct ProverReporter { pub(crate) pool: ConnectionPool, @@ -84,6 +81,11 @@ impl ProverReporter { ); if successful_proofs_count == required_proofs_count { + vlog::info!( + "Marking {:?} job for l1 batch number {:?} as queued", + next_round, + prover_job_metadata.block_number + ); transaction .witness_generator_dal() .mark_witness_job_as_queued(prover_job_metadata.block_number, next_round); @@ -100,11 +102,6 @@ impl ProverReporter { ); } transaction.commit_blocking(); - metrics::gauge!( - "server.block_number", - prover_job_metadata.block_number.0 as f64, - "stage" => format!("prove_{:?}",prover_job_metadata.aggregation_round), - ); } fn get_circuit_type(&self, job_id: usize) -> String { diff --git a/core/bin/prover/src/socket_listener.rs b/core/bin/prover/src/socket_listener.rs index 815b1faa4c46..3f251cf7dca9 100644 --- a/core/bin/prover/src/socket_listener.rs +++ b/core/bin/prover/src/socket_listener.rs @@ -14,6 +14,7 @@ pub async fn incoming_socket_listener( queue: SharedAssemblyQueue, pool: ConnectionPool, specialized_prover_group_id: u8, + region: String, ) { let listening_address = SocketAddr::new(host, port); vlog::info!( @@ -32,6 +33,7 @@ pub async fn incoming_socket_listener( address.clone(), queue.lock().unwrap().capacity(), specialized_prover_group_id, + region ); loop { diff --git a/core/bin/prover/src/synthesized_circuit_provider.rs b/core/bin/prover/src/synthesized_circuit_provider.rs index 57858f8887ec..811e78ee3190 100644 --- a/core/bin/prover/src/synthesized_circuit_provider.rs +++ b/core/bin/prover/src/synthesized_circuit_provider.rs @@ -31,6 +31,7 @@ impl RemoteSynthesizer for SynthesizedCircuitProvider { let is_full = assembly_queue.capacity() == assembly_queue.size(); return match assembly_queue.remove() { Ok(blob) => { + let queue_free_slots = assembly_queue.capacity() - assembly_queue.size(); if is_full { self.pool .clone() @@ -38,9 +39,19 @@ impl RemoteSynthesizer for SynthesizedCircuitProvider { .gpu_prover_queue_dal() .update_prover_instance_from_full_to_available( self.address.clone(), - assembly_queue.capacity() - assembly_queue.size(), + queue_free_slots, ); } + vlog::info!( + "Queue free slot {} for capacity {}", + queue_free_slots, + assembly_queue.capacity() + ); + metrics::histogram!( + "server.prover.queue_free_slots", + queue_free_slots as f64, + "queue_capacity" => assembly_queue.capacity().to_string() + ); Some(Box::new(Cursor::new(blob))) } Err(_) => None, diff --git a/core/bin/set_correct_tx_format_for_priority_ops/Cargo.toml b/core/bin/set_correct_tx_format_for_priority_ops/Cargo.toml deleted file mode 100644 index 8bc799cad687..000000000000 --- a/core/bin/set_correct_tx_format_for_priority_ops/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "set_correct_tx_format_for_priority_ops" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tokio = { version = "1" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } diff --git a/core/bin/set_correct_tx_format_for_priority_ops/src/main.rs b/core/bin/set_correct_tx_format_for_priority_ops/src/main.rs deleted file mode 100644 index 0e208c6b241b..000000000000 --- a/core/bin/set_correct_tx_format_for_priority_ops/src/main.rs +++ /dev/null @@ -1,18 +0,0 @@ -use std::thread::sleep; -use std::time::Duration; -use zksync_dal::ConnectionPool; - -#[tokio::main] -async fn main() { - let pool = ConnectionPool::new(Some(1), true); - let mut storage = pool.access_storage().await; - - while storage - .transactions_dal() - .set_correct_tx_type_for_priority_operations(40) - { - println!("Some txs were updated"); - sleep(Duration::from_secs(1)); - } - println!("finish"); -} diff --git a/core/bin/setup_key_generator_and_server/Cargo.lock b/core/bin/setup_key_generator_and_server/Cargo.lock index c00d3f3700dd..6e50836f3760 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.lock +++ b/core/bin/setup_key_generator_and_server/Cargo.lock @@ -118,7 +118,7 @@ checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -144,8 +144,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -177,34 +177,35 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -268,9 +269,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bellman_ce" @@ -331,8 +332,8 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "regex", "rustc-hash", "shlex", @@ -430,9 +431,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -512,9 +513,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -539,16 +540,16 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#1769d65aa592645a097af20b1739c7cacb0715c1" +source = "git+https://github.com/matter-labs/circuit_testing.git?branch=main#028864449036071cfb4e9ebe7ee4c5be59893031" dependencies = [ "bellman_ce", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", @@ -582,7 +583,7 @@ dependencies = [ [[package]] name = "codegen" version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#4fb6397f778a580c9207ec23661228f5da7e66b4" +source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#cad8d38f631691a6b456eb4eb7b410fd129ca006" dependencies = [ "ethereum-types", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -616,9 +617,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "constant_time_eq" @@ -678,11 +679,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-epoch 0.9.13", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-epoch 0.9.14", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -697,12 +698,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -718,13 +719,13 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.13", - "crossbeam-utils 0.8.14", + "crossbeam-epoch 0.9.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -744,14 +745,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", - "memoffset 0.7.1", + "crossbeam-utils 0.8.15", + "memoffset 0.8.0", "scopeguard", ] @@ -773,7 +774,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", ] [[package]] @@ -789,9 +790,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -847,13 +848,13 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -867,9 +868,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90d59d9acd2a682b4e40605a242f6670eaa58c5957471cbf85e8aa6a0b97a5e8" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -879,34 +880,34 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfa40bda659dd5c864e65f4c9a2b0aff19bea56b017b9b77c73d3766a453a38" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "scratch", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] name = "cxxbridge-flags" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "457ce6757c5c70dc6ecdbda6925b958aae7f959bda7d8fb9bde889e34a09dc03" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebf883b7aacd7b2aeb2a7b338648ee19f57c140d4ee8e52c68979c6b2f7f2263" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -927,10 +928,10 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "strsim 0.10.0", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -940,8 +941,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -970,9 +971,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -982,10 +983,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustc_version", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -1003,7 +1004,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1165,9 +1166,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1204,10 +1205,22 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "serde", - "syn 1.0.107", + "syn 1.0.109", +] + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", ] [[package]] @@ -1332,9 +1345,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -1347,9 +1360,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -1357,15 +1370,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -1375,9 +1388,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-locks" @@ -1391,26 +1404,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -1420,9 +1433,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-channel", "futures-core", @@ -1470,9 +1483,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" @@ -1483,7 +1496,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bindgen", "crossbeam 0.8.2", @@ -1496,7 +1509,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -1522,9 +1535,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes 1.4.0", "fnv", @@ -1534,8 +1547,8 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.25.0", - "tokio-util 0.7.6", + "tokio 1.26.0", + "tokio-util 0.7.7", "tracing", ] @@ -1661,9 +1674,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes 1.4.0", "fnv", @@ -1701,9 +1714,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -1717,7 +1730,7 @@ dependencies = [ "itoa", "pin-project-lite 0.2.9", "socket2", - "tokio 1.25.0", + "tokio 1.26.0", "tower-service", "tracing", "want", @@ -1732,7 +1745,7 @@ dependencies = [ "http", "hyper", "rustls", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-rustls", ] @@ -1744,7 +1757,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-io-timeout", ] @@ -1757,7 +1770,7 @@ dependencies = [ "bytes 1.4.0", "hyper", "native-tls", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-native-tls", ] @@ -1845,9 +1858,9 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -1871,9 +1884,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" dependencies = [ "libc", "windows-sys 0.45.0", @@ -1887,9 +1900,9 @@ checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", @@ -1908,9 +1921,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" @@ -1971,9 +1984,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" @@ -2063,9 +2076,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg 1.1.0", ] @@ -2087,9 +2100,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2115,14 +2128,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2165,15 +2178,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2272,9 +2276,9 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2353,9 +2357,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -2384,9 +2388,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2415,7 +2419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.6", + "crossbeam-channel 0.5.7", "futures-channel", "futures-executor", "futures-util", @@ -2456,7 +2460,7 @@ dependencies = [ "prost-build", "reqwest", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "tonic", "tonic-build", ] @@ -2545,9 +2549,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2585,9 +2589,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pbkdf2" @@ -2625,9 +2629,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -2635,9 +2639,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -2645,22 +2649,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "pest_meta" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", @@ -2692,9 +2696,9 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2758,9 +2762,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", "toml_edit", @@ -2773,9 +2777,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "version_check", ] @@ -2785,8 +2789,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "version_check", ] @@ -2807,9 +2811,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -2852,9 +2856,9 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -2870,11 +2874,11 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-heavy-ops-service.git?branch=main#fda73622aac502ed5944bf135e31a6be9355886b" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=cleanup#c40d66b6d57f7f4d39259ffb993f5a1bba64bf6c" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.14", + "crossbeam-utils 0.8.15", "log", "num_cpus", "rand 0.4.6", @@ -2894,11 +2898,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", ] [[package]] @@ -3097,9 +3101,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -3107,13 +3111,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.6", - "crossbeam-deque 0.8.2", - "crossbeam-utils 0.8.14", + "crossbeam-channel 0.5.7", + "crossbeam-deque 0.8.3", + "crossbeam-utils 0.8.15", "num_cpus", ] @@ -3161,15 +3165,6 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.14" @@ -3200,7 +3195,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-native-tls", "tokio-rustls", "tower-service", @@ -3314,9 +3309,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ "bitflags", "errno", @@ -3349,15 +3344,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "salsa20" @@ -3385,9 +3380,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "scrypt" @@ -3482,15 +3477,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "sentry" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e" +checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" dependencies = [ "httpdate", "native-tls", @@ -3498,16 +3493,17 @@ dependencies = [ "sentry-backtrace", "sentry-contexts", "sentry-core", + "sentry-debug-images", "sentry-panic", - "tokio 1.25.0", + "tokio 1.26.0", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a" +checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" dependencies = [ "backtrace", "once_cell", @@ -3517,9 +3513,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259" +checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" dependencies = [ "hostname", "libc", @@ -3531,9 +3527,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd" +checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" dependencies = [ "once_cell", "rand 0.8.5", @@ -3542,11 +3538,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "sentry-debug-images" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +dependencies = [ + "findshlibs", + "once_cell", + "sentry-core", +] + [[package]] name = "sentry-panic" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66" +checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3554,9 +3561,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.29.2" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7" +checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" dependencies = [ "debugid", "getrandom 0.2.8", @@ -3564,36 +3571,36 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.17", + "time 0.3.20", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -3629,9 +3636,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3723,9 +3730,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -3742,9 +3749,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -3757,9 +3764,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -3824,9 +3831,9 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3845,10 +3852,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "rustversion", - "syn 1.0.107", + "syn 1.0.109", ] [[package]] @@ -3870,19 +3877,19 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", + "proc-macro2 1.0.52", + "quote 1.0.26", "unicode-ident", ] [[package]] name = "sync_vm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=main#370c4165dee04e15235af2692b4aafcca28243b9" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.1#cd58346ec0e9fc5a5715b8e6f0f38a9cf5f3653b" dependencies = [ "arrayvec 0.7.2", "cs_derive", @@ -3914,16 +3921,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys 0.42.0", ] [[package]] @@ -3941,9 +3947,9 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -3957,30 +3963,31 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -3997,9 +4004,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "serde", @@ -4015,9 +4022,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -4068,9 +4075,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", @@ -4083,7 +4090,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -4093,7 +4100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -4102,9 +4109,9 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4114,7 +4121,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -4124,19 +4131,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", - "tokio 1.25.0", + "tokio 1.26.0", "webpki", ] [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] @@ -4150,38 +4157,38 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", ] [[package]] name = "tokio-util" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6a3b08b64e6dfad376fa2432c7b1f01522e37a623c3050bc95db2d3ff21583" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes 1.4.0", "futures-core", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.25.0", + "tokio 1.26.0", "tracing", ] [[package]] name = "toml_datetime" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" [[package]] name = "toml_edit" -version = "0.18.1" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" dependencies = [ "indexmap", - "nom8", "toml_datetime", + "winnow", ] [[package]] @@ -4205,7 +4212,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "tokio 1.25.0", + "tokio 1.26.0", "tokio-stream", "tokio-util 0.6.10", "tower", @@ -4221,10 +4228,10 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.51", + "proc-macro2 1.0.52", "prost-build", - "quote 1.0.23", - "syn 1.0.107", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4240,8 +4247,8 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.25.0", - "tokio-util 0.7.6", + "tokio 1.26.0", + "tokio-util 0.7.7", "tower-layer", "tower-service", "tracing", @@ -4278,9 +4285,9 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", ] [[package]] @@ -4353,7 +4360,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.17", + "time 0.3.20", "tracing", "tracing-core", "tracing-log", @@ -4401,15 +4408,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -4565,9 +4572,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -4589,7 +4596,7 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ - "quote 1.0.23", + "quote 1.0.26", "wasm-bindgen-macro-support", ] @@ -4599,9 +4606,9 @@ version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ - "proc-macro2 1.0.51", - "quote 1.0.23", - "syn 1.0.107", + "proc-macro2 1.0.52", + "quote 1.0.26", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4740,9 +4747,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -4755,45 +4762,54 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "winnow" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -4819,7 +4835,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zk_evm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=main#ca203a25cbff50ad623630e393dd041aca58038d" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.1#c3d405d2a45526d87a803792745297860a19916b" dependencies = [ "blake2 0.10.6", "k256", @@ -4835,8 +4851,8 @@ dependencies = [ [[package]] name = "zkevm-assembly" -version = "1.3.0" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=main#b995edca9b3e263f0d989f3c8c31eed1450fe3fc" +version = "1.3.1" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.1#0ddd7e70d8d3d13725a937ec1553c8088fb61fda" dependencies = [ "env_logger 0.9.3", "hex", @@ -4846,6 +4862,7 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "regex", + "sha3 0.10.6", "smallvec", "structopt", "thiserror", @@ -4855,7 +4872,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=main#b9fd187b477358465b7e332f72e5bebfe64f02b8" +source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.1#bb7888c83599bb9ee98041abea11f6524556d4e9" dependencies = [ "bitflags", "ethereum-types", @@ -4866,7 +4883,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=main#e4e6aaf78b45280ec99057ba45a393776d8e45a2" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.1#2a568f7bdbd61cdf389370be1162977d93ed9625" dependencies = [ "bincode", "blake2 0.10.6", @@ -4923,6 +4940,7 @@ dependencies = [ "ethabi", "hex", "once_cell", + "serde", "serde_json", "zksync_utils", ] @@ -4994,9 +5012,10 @@ dependencies = [ "futures", "hex", "num 0.3.1", + "reqwest", "serde", "thiserror", - "tokio 1.25.0", + "tokio 1.26.0", "zk_evm", "zksync_basic_types", ] diff --git a/core/bin/storage_logs_migration/Cargo.toml b/core/bin/storage_logs_migration/Cargo.toml deleted file mode 100644 index 8aaca82c5a8a..000000000000 --- a/core/bin/storage_logs_migration/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "storage_logs_migration" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tokio = { version = "1" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } diff --git a/core/bin/storage_logs_migration/src/main.rs b/core/bin/storage_logs_migration/src/main.rs deleted file mode 100644 index 44871cb16dd5..000000000000 --- a/core/bin/storage_logs_migration/src/main.rs +++ /dev/null @@ -1,27 +0,0 @@ -use zksync_dal::ConnectionPool; -use zksync_types::L1BatchNumber; - -#[tokio::main] -async fn main() { - let pool = ConnectionPool::new(Some(1), true); - let mut storage = pool.access_storage().await; - let last_sealed_l1_batch = storage.blocks_dal().get_sealed_block_number(); - - let mut current_l1_batch_number = L1BatchNumber(0); - let block_range = 100u32; - while current_l1_batch_number <= last_sealed_l1_batch { - let to_l1_batch_number = current_l1_batch_number + block_range - 1; - storage - .storage_logs_dedup_dal() - .migrate_protective_reads(current_l1_batch_number, to_l1_batch_number); - storage - .storage_logs_dedup_dal() - .migrate_initial_writes(current_l1_batch_number, to_l1_batch_number); - println!( - "Processed l1 batches {}-{}", - current_l1_batch_number, to_l1_batch_number - ); - - current_l1_batch_number += block_range; - } -} diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index 2f29f770e6d3..ea7d4ad8d95d 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -12,8 +12,8 @@ use vm::{ }, }; use zksync_types::{ - IntrinsicSystemGasConstants, FAIR_L2_GAS_PRICE, GUARANTEED_PUBDATA_IN_TX, - L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, + IntrinsicSystemGasConstants, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, + MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, }; mod intrinsic_costs; @@ -42,15 +42,20 @@ struct L1SystemConfig { l1_tx_delta_factory_deps_l2_gas: u32, l1_tx_delta_factory_deps_pubdata: u32, max_new_factory_deps: u32, - default_l2_gas_price_per_pubdata: u64, + required_l2_gas_price_per_pubdata: u64, } pub fn generate_l1_contracts_system_config(gas_constants: &IntrinsicSystemGasConstants) -> String { + // Currently this value is hardcoded here as a constant. + // L1->L2 txs are free for now and thus this value is unused on L1 contract, so it's ok. + // Though, maybe it's worth to use some other approach when users will pay for L1->L2 txs. + const FAIR_L2_GAS_PRICE_ON_L1_CONTRACT: u64 = 250_000_000; + let l1_contracts_config = L1SystemConfig { l2_tx_max_gas_limit: MAX_TX_ERGS_LIMIT, max_pubdata_per_block: MAX_PUBDATA_PER_BLOCK, priority_tx_max_pubdata: (L1_TX_DECREASE * (MAX_PUBDATA_PER_BLOCK as f64)) as u32, - fair_l2_gas_price: FAIR_L2_GAS_PRICE, + fair_l2_gas_price: FAIR_L2_GAS_PRICE_ON_L1_CONTRACT, l1_gas_per_pubdata_byte: L1_GAS_PER_PUBDATA_BYTE, block_overhead_l2_gas: BLOCK_OVERHEAD_GAS, block_overhead_l1_gas: BLOCK_OVERHEAD_L1_GAS, @@ -64,7 +69,7 @@ pub fn generate_l1_contracts_system_config(gas_constants: &IntrinsicSystemGasCon l1_tx_delta_factory_deps_l2_gas: gas_constants.l1_tx_delta_factory_dep_gas, l1_tx_delta_factory_deps_pubdata: gas_constants.l1_tx_delta_factory_dep_pubdata, max_new_factory_deps: MAX_NEW_FACTORY_DEPS as u32, - default_l2_gas_price_per_pubdata: MAX_GAS_PER_PUBDATA_BYTE, + required_l2_gas_price_per_pubdata: MAX_GAS_PER_PUBDATA_BYTE, }; serde_json::to_string_pretty(&l1_contracts_config).unwrap() diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 3f2ea259ff56..592761a67116 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -13,7 +13,10 @@ use vm::{ zk_evm::{aux_structures::Timestamp, zkevm_opcode_defs::BOOTLOADER_HEAP_PAGE}, OracleTools, }; -use zksync_contracts::{load_sys_contract, read_bootloader_code, SystemContractCode}; +use zksync_contracts::{ + load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, BaseSystemContracts, + ContractLanguage, SystemContractCode, +}; use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; use zksync_storage::{db::Database, RocksDB}; use zksync_types::{ @@ -30,17 +33,27 @@ use zksync_types::{ BOOTLOADER_ADDRESS, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; use crate::intrinsic_costs::VmSpentResourcesResult; -pub static GAS_TEST_BOOTLOADER_CODE: Lazy = Lazy::new(|| { +pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_bootloader_code("gas_test"); let hash = hash_bytecode(&bytecode); - SystemContractCode { + let bootloader = SystemContractCode { code: bytes_to_be_words(bytecode), - hash: h256_to_u256(hash), + hash, + }; + + let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); + let hash = hash_bytecode(&bytecode); + BaseSystemContracts { + default_aa: SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }, + bootloader, } }); @@ -146,14 +159,33 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let mut oracle_tools = OracleTools::new(storage_ptr); - let transfer_test_bootloader = read_bootloader_test_code("transfer_test"); + let bytecode = read_bootloader_test_code("transfer_test"); + let hash = hash_bytecode(&bytecode); + + let bootloader = SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }; + + let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); + let hash = hash_bytecode(&bytecode); + + let default_aa = SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }; + + let base_system_contract = BaseSystemContracts { + bootloader, + default_aa, + }; let mut vm = init_vm_inner( &mut oracle_tools, BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - transfer_test_bootloader, + &base_system_contract, TxExecutionMode::VerifyExecute, ); @@ -247,7 +279,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - GAS_TEST_BOOTLOADER_CODE.code.clone(), + &GAS_TEST_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); @@ -258,9 +290,10 @@ pub(super) fn execute_user_txs_in_test_gas_vm( tx.clone().into(), TxExecutionMode::VerifyExecute, 0, + None, ); let tx_execution_result = vm - .execute_next_tx() + .execute_next_tx(u32::MAX) .expect("Bootloader failed while processing transaction"); total_gas_refunded += tx_execution_result.gas_refunded; diff --git a/core/bin/verification_key_generator_and_server/data/verification_11_key.json b/core/bin/verification_key_generator_and_server/data/verification_11_key.json index 3bae44408910..ec60b1b5c70c 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_11_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_11_key.json @@ -36,16 +36,16 @@ }, { "x": [ - 15617602246632337967, - 5734632097258026316, - 10326516376673449026, - 104556261410764610 + 4949545806128010634, + 7991544258837652527, + 13984289231122041826, + 435264553263929947 ], "y": [ - 11823761102806147813, - 10127667027117555433, - 513779115084852666, - 2443281277309199970 + 5315155210033461895, + 5269954775753247626, + 8365554241810378947, + 3038338810517586456 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/data/verification_3_key.json b/core/bin/verification_key_generator_and_server/data/verification_3_key.json index ec51689663ce..dcf6f058ba87 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_3_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_3_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 4552413926111948852, - 10957144662880794759, - 1806526858308545824, - 2720977094694375461 + 10550376187145162963, + 12863286379272137663, + 5328709993963115843, + 2172775919728224150 ], "y": [ - 9957232607201895378, - 9642598329050582748, - 7241368557931304566, - 2368139841965090910 + 16641463475462267374, + 9954127684318722109, + 17488757377000951923, + 897495197662393128 ], "infinity": false }, { "x": [ - 17664367774067691121, - 5434452106745188417, - 13061281583481015732, - 1636638472636008156 + 6563322001445008336, + 797353186481493880, + 13315537264768069768, + 1248024693457931662 ], "y": [ - 4351985207465487643, - 11395591846019168789, - 16136180329293395748, - 3037028595557996988 + 17871396538659348629, + 11120452368184545101, + 4860719086907382388, + 545958236928374726 ], "infinity": false }, { "x": [ - 11088564426519797199, - 13911240355467381641, - 704193242607761309, - 2841093790816726473 + 292978148064211492, + 14411686124929797817, + 4896230459039474260, + 3325585646245466745 ], "y": [ - 8962630520073659328, - 173793000390552550, - 5675477828895844644, - 1653616683677519745 + 6546733926726165682, + 13661029324178790447, + 8144639210545978915, + 1916046104003446938 ], "infinity": false }, { "x": [ - 12882527139660212146, - 13453501656574828481, - 7645846961915393962, - 1425197621755678886 + 522387150147797243, + 8068708650984191567, + 9541497069690935578, + 1760680995799985067 ], "y": [ - 16430175978765315928, - 15495807420095904477, - 2277767085665142018, - 2828865080019988295 + 3038250252977779269, + 11127852903590962676, + 17196318053761803653, + 2056822949845331380 ], "infinity": false }, { "x": [ - 14984520689209033539, - 11643555254505584002, - 11263749877444050325, - 1778453501627364370 + 13013233153912437234, + 3481939096748005263, + 11703430308701963632, + 699965740798448577 ], "y": [ - 13093435792719005783, - 3521622018192356851, - 17363442251541284841, - 1729103955346249787 + 2376338211072159395, + 3649208867362862370, + 14621597111684436147, + 2544346533921699116 ], "infinity": false }, { "x": [ - 8472557649663210509, - 12959218494998230596, - 9420261090312891796, - 1557623015274275213 + 17820130262026588600, + 12964227826098503201, + 12061824382203771777, + 1319872583253268021 ], "y": [ - 16052238026542101971, - 10179034100393360237, - 9978634553947320008, - 578621147663557199 + 5311405529601443696, + 4018470607034478338, + 12467574722491214286, + 1680065035418478766 ], "infinity": false }, { "x": [ - 7716060755495033877, - 2483781464143055151, - 1181897707039138261, - 111167263933376180 + 16492013190113062691, + 4078199263259563511, + 5012129511641877781, + 41349067130114428 ], "y": [ - 8737220167574918233, - 11705129040253191126, - 324694051487940786, - 1358715253160880918 + 1114102693178536110, + 2503559827484609194, + 17580206671275585606, + 1867813293022735930 ], "infinity": false }, { "x": [ - 16704606564921646360, - 10831094419697381185, - 8629189802092464726, - 1457480943878296289 + 16110849929343439596, + 6211162944442225023, + 4176530808959958823, + 776245536604962044 ], "y": [ - 13963821601066807541, - 18041123136689723150, - 10814456746840952660, - 2173492275071023829 + 16924432738061870794, + 12887029707706499486, + 4536220723940362359, + 3066276249014284578 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 13927786032597906772, - 16736825632878665355, - 1344069241704076041, - 3395012689630160919 + 1387561443159160261, + 477775943378385877, + 18289682494460818312, + 1225127103248480226 ], "y": [ - 2384561632721384994, - 14766877387148951981, - 2144452745561381419, - 457273820347677951 + 13861633555716877419, + 11021086605287795744, + 16936179868445624555, + 1675422904480642708 ], "infinity": false }, { "x": [ - 15439764061223871624, - 3299628057930947680, - 14198600802212718285, - 1397095985255125902 + 15468643478719493386, + 16296293513431599325, + 16896241103993257585, + 2527892380069637821 ], "y": [ - 15377822596502057312, - 6285736694449381031, - 11301642243242685820, - 1377917967042996956 + 6924618732580187946, + 4145636937666672779, + 7746613394270955608, + 3066347309052150921 ], "infinity": false } @@ -160,78 +160,78 @@ "permutation_commitments": [ { "x": [ - 15570364878064191471, - 6282941464448732634, - 9471325995619045861, - 1232197041581264155 + 14066357332293623201, + 5590408864608988441, + 13212552532626677878, + 1266149383570298600 ], "y": [ - 12280116801318148056, - 3076430362699060719, - 15641862360252683642, - 1036589014921989740 + 8869085471956584718, + 5205295041620019258, + 3809406423704825921, + 1179747793942763876 ], "infinity": false }, { "x": [ - 5619821529319779769, - 369651205919014093, - 3573760605900424455, - 1578181493222357476 + 14164174487380098248, + 399714379143206482, + 17932555173948549907, + 1510320565295811683 ], "y": [ - 13972658895413012837, - 3324728560278728728, - 1514863722019353225, - 283539618506176946 + 16262108781366800885, + 11903929495245543058, + 2030047105830251389, + 3175594667863213623 ], "infinity": false }, { "x": [ - 7313654849318558545, - 3856980231091609256, - 13652594590991171434, - 1350431962017866810 + 12424381171397409053, + 9816009036263118486, + 123424474575366759, + 2076055869458958907 ], "y": [ - 15802200192820379645, - 7943671073337715397, - 9635282915426247707, - 2405156099602918935 + 9044471714502471176, + 3761425429363202673, + 2034795243747202222, + 2173356352931419454 ], "infinity": false }, { "x": [ - 16927593081999500567, - 12015826224538208031, - 13033887047158923253, - 508845269866381969 + 15867531630171378979, + 18364941470214608297, + 14848155053348599697, + 1692409159029904984 ], "y": [ - 3816934949381974009, - 15225306878268851022, - 13695992495268457957, - 2039957178158588775 + 15366054888699024816, + 17348713584488930215, + 15195830143979672152, + 572727508810720113 ], "infinity": false } ], - "total_lookup_entries_length": 15103347, + "total_lookup_entries_length": 15088282, "lookup_selector_commitment": { "x": [ - 132701641292747981, - 9385191737942122578, - 9224572231433703600, - 422768887908740278 + 862345184327547040, + 7262652051250145030, + 10964591811473823871, + 1435380748710871870 ], "y": [ - 7973095609514393935, - 9109168329146163159, - 1665658611543684747, - 2682386040886163584 + 10151301439783108881, + 1188186211158585113, + 8160902585771466656, + 833565525567445435 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 3337588084106662427, - 632442254875580351, - 13994678952882390428, - 2231491013059247615 + 12316913295088139285, + 11480691069159889893, + 7996535489614067794, + 809390717012814237 ], "y": [ - 1885998007338074473, - 15463564406739479850, - 16180378634933639894, - 2911611175129454243 + 6230277635134941386, + 3986531911879547545, + 3909527337181494200, + 3269190001359038160 ], "infinity": false }, diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index ee18d8d64fb4..3106fd55cc01 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -30,7 +30,7 @@ zksync_mini_merkle_tree = { path = "../../lib/mini_merkle_tree", version = "1.0" zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-features = false, features = [ - "server", + "server", "client" ] } zksync_object_store = { path = "../../lib/object_store", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox.rs b/core/bin/zksync_core/src/api_server/execution_sandbox.rs index 86e266abc6dd..6e60b6c0b003 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox.rs @@ -6,23 +6,22 @@ use crate::api_server::web3::backend_jsonrpc::error::internal_error; use thiserror::Error; use tracing::{span, Level}; use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; -use vm::utils::default_block_properties; use zksync_types::api::BlockId; -use zksync_types::tx::tx_execution_info::get_initial_and_repeated_storage_writes; use zksync_types::utils::storage_key_for_eth_balance; -use zksync_types::{ - get_known_code_key, H256, PUBLISH_BYTECODE_OVERHEAD, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, -}; +use zksync_types::{PUBLISH_BYTECODE_OVERHEAD, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use crate::db_storage_provider::DbStorageProvider; use vm::vm_with_bootloader::{ derive_base_fee_and_gas_per_pubdata, init_vm, push_transaction_to_bootloader_memory, BlockContext, BlockContextMode, BootloaderJobType, DerivedBlockContext, TxExecutionMode, }; +use vm::zk_evm::block_properties::BlockProperties; use vm::{ storage::Storage, utils::ETH_CALL_GAS_LIMIT, TxRevertReason, VmBlockResult, VmExecutionResult, VmInstance, }; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_state::storage_view::StorageView; use zksync_types::{ @@ -31,10 +30,11 @@ use zksync_types::{ fee::TransactionExecutionMetrics, get_nonce_key, l2::L2Tx, + storage_writes_deduplicator::StorageWritesDeduplicator, utils::{decompose_full_nonce, nonces_to_full_nonce}, AccountTreeId, MiniblockNumber, Nonce, Transaction, U256, }; -use zksync_utils::bytecode::{bytecode_len_in_bytes, hash_bytecode}; +use zksync_utils::bytecode::{bytecode_len_in_bytes, hash_bytecode, CompressedBytecodeInfo}; use zksync_utils::time::millis_since_epoch; use zksync_utils::{h256_to_u256, u256_to_h256}; use zksync_web3_decl::error::Web3Error; @@ -75,6 +75,7 @@ pub fn execute_tx_eth_call( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, + base_system_contract: &BaseSystemContracts, ) -> Result { let mut storage = connection_pool.access_storage_blocking(); let resolved_block_number = storage @@ -92,7 +93,7 @@ pub fn execute_tx_eth_call( tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); let vm_result = execute_tx_in_sandbox( storage, - tx, + tx.into(), TxExecutionMode::EthCall, AccountTreeId::default(), block_id, @@ -104,6 +105,7 @@ pub fn execute_tx_eth_call( l1_gas_price, fair_l2_gas_price, enforced_base_fee, + base_system_contract, ) .1 .map_err(|err| { @@ -127,11 +129,17 @@ fn get_pending_state( (block_id, connection, resolved_block_number) } -#[tracing::instrument(skip(connection_pool, tx, operator_account, enforced_nonce))] +#[tracing::instrument(skip( + connection_pool, + tx, + operator_account, + enforced_nonce, + base_system_contracts +))] #[allow(clippy::too_many_arguments)] pub fn execute_tx_with_pending_state( connection_pool: &ConnectionPool, - tx: L2Tx, + tx: Transaction, operator_account: AccountTreeId, execution_mode: TxExecutionMode, enforced_nonce: Option, @@ -139,6 +147,7 @@ pub fn execute_tx_with_pending_state( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, + base_system_contracts: &BaseSystemContracts, ) -> ( TransactionExecutionMetrics, Result, @@ -150,7 +159,7 @@ pub fn execute_tx_with_pending_state( let l1_gas_price = adjust_l1_gas_price_for_tx( l1_gas_price, fair_l2_gas_price, - tx.common_data.fee.gas_per_pubdata_limit, + tx.gas_per_pubdata_byte_limit(), ); execute_tx_in_sandbox( @@ -167,6 +176,7 @@ pub fn execute_tx_with_pending_state( l1_gas_price, fair_l2_gas_price, enforced_base_fee, + base_system_contracts, ) } @@ -182,20 +192,23 @@ pub fn get_pubdata_for_factory_deps( factory_deps .as_ref() .map(|deps| { - let mut total_published_length = 0; - - for dep in deps.iter() { - let bytecode_hash = hash_bytecode(dep); - let key = get_known_code_key(&bytecode_hash); - - // The bytecode needs to be published only if it is not known - let is_known = storage_view.get_value(&key); - if is_known == H256::zero() { - total_published_length += dep.len() as u32 + PUBLISH_BYTECODE_OVERHEAD; - } - } - - total_published_length + deps.iter() + .filter_map(|bytecode| { + if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) { + return None; + } + + let length = if let Ok(compressed) = + CompressedBytecodeInfo::from_original(bytecode.clone()) + { + compressed.compressed.len() + } else { + bytecode.len() + }; + + Some(length as u32 + PUBLISH_BYTECODE_OVERHEAD) + }) + .sum() }) .unwrap_or_default() } @@ -211,6 +224,8 @@ pub fn validate_tx_with_pending_state( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, + base_system_contracts: &BaseSystemContracts, + computational_gas_limit: u32, ) -> Result<(), ValidationError> { let (block_id, connection, resolved_block_number) = get_pending_state(connection_pool); @@ -227,6 +242,7 @@ pub fn validate_tx_with_pending_state( tx, execution_mode, operator_account, + base_system_contracts, block_id, resolved_block_number, None, @@ -235,10 +251,11 @@ pub fn validate_tx_with_pending_state( l1_gas_price, fair_l2_gas_price, enforced_base_fee, + computational_gas_limit, ) } -fn adjust_l1_gas_price_for_tx( +pub(crate) fn adjust_l1_gas_price_for_tx( l1_gas_price: u64, fair_l2_gas_price: u64, tx_gas_per_pubdata_limit: U256, @@ -262,11 +279,17 @@ fn adjust_l1_gas_price_for_tx( /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) -#[tracing::instrument(skip(connection, tx, operator_account, block_timestamp_s))] #[allow(clippy::too_many_arguments)] +#[tracing::instrument(skip( + connection, + tx, + operator_account, + block_timestamp_s, + base_system_contract +))] fn execute_tx_in_sandbox( connection: StorageProcessor<'_>, - tx: L2Tx, + tx: Transaction, execution_mode: TxExecutionMode, operator_account: AccountTreeId, block_id: api::BlockId, @@ -278,6 +301,7 @@ fn execute_tx_in_sandbox( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, + base_system_contract: &BaseSystemContracts, ) -> ( TransactionExecutionMetrics, Result, @@ -295,6 +319,7 @@ fn execute_tx_in_sandbox( connection, tx, execution_mode, + base_system_contract, operator_account, block_id, resolved_block_number, @@ -305,8 +330,7 @@ fn execute_tx_in_sandbox( fair_l2_gas_price, enforced_base_fee, |vm, tx| { - let tx: Transaction = tx.into(); - push_transaction_to_bootloader_memory(vm, &tx, execution_mode); + push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); let VmBlockResult { full_result: result, .. @@ -333,8 +357,9 @@ fn execute_tx_in_sandbox( #[allow(clippy::too_many_arguments)] fn apply_vm_in_sandbox( mut connection: StorageProcessor<'_>, - tx: L2Tx, + tx: Transaction, execution_mode: TxExecutionMode, + base_system_contracts: &BaseSystemContracts, operator_account: AccountTreeId, block_id: api::BlockId, resolved_block_number: zksync_types::MiniblockNumber, @@ -344,7 +369,7 @@ fn apply_vm_in_sandbox( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, - apply: impl FnOnce(&mut Box>, L2Tx) -> T, + apply: impl FnOnce(&mut Box>, Transaction) -> T, ) -> T { let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "initialization").entered(); @@ -416,7 +441,10 @@ fn apply_vm_in_sandbox( } let mut oracle_tools = vm::OracleTools::new(&mut storage_view as &mut dyn Storage); - let block_properties = default_block_properties(); + let block_properties = BlockProperties { + default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }; let block_context = DerivedBlockContext { context: BlockContext { @@ -440,6 +468,7 @@ fn apply_vm_in_sandbox( block_context_properties, &block_properties, execution_mode, + base_system_contracts, ); metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "initialization"); @@ -447,22 +476,31 @@ fn apply_vm_in_sandbox( let result = apply(&mut vm, tx); + let oracles_sizes = record_vm_memory_metrics(vm); + let storage_view_cache = storage_view.get_cache_size(); + metrics::histogram!( + "runtime_context.memory.storage_view_cache_size", + storage_view_cache as f64 + ); + metrics::histogram!( + "runtime_context.memory", + (oracles_sizes + storage_view_cache) as f64 + ); + metrics::histogram!("runtime_context.storage_interaction", storage_view.storage_invocations as f64, "interaction" => "set_value_storage_invocations"); metrics::histogram!("runtime_context.storage_interaction", storage_view.new_storage_invocations as f64, "interaction" => "set_value_new_storage_invocations"); metrics::histogram!("runtime_context.storage_interaction", storage_view.get_value_storage_invocations as f64, "interaction" => "set_value_get_value_storage_invocations"); metrics::histogram!("runtime_context.storage_interaction", storage_view.set_value_storage_invocations as f64, "interaction" => "set_value_set_value_storage_invocations"); - metrics::histogram!("runtime_context.storage_interaction", storage_view.contract_load_invocations as f64, "interaction" => "set_value_contract_load_invocations"); const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1000; if storage_view.storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { vlog::info!( - "Tx resulted in {} storage_invocations, {} new_storage_invocations, {} get_value_storage_invocations, {} set_value_storage_invocations, {} contract_load_invocations", + "Tx resulted in {} storage_invocations, {} new_storage_invocations, {} get_value_storage_invocations, {} set_value_storage_invocations", storage_view.storage_invocations, storage_view.new_storage_invocations, storage_view.get_value_storage_invocations, storage_view.set_value_storage_invocations, - storage_view.contract_load_invocations ); } @@ -475,6 +513,7 @@ fn apply_vm_in_sandbox( fn get_validation_params( connection: &mut StorageProcessor<'_>, tx: &L2Tx, + computational_gas_limit: u32, ) -> ValidationTracerParams { let user_address = tx.common_data.initiator_address; let paymaster_address = tx.common_data.paymaster_params.paymaster; @@ -520,6 +559,7 @@ fn get_validation_params( trusted_slots, trusted_addresses, trusted_address_slots, + computational_gas_limit, } } @@ -529,6 +569,7 @@ fn validate_tx_in_sandbox( tx: L2Tx, execution_mode: TxExecutionMode, operator_account: AccountTreeId, + base_system_contracts: &BaseSystemContracts, block_id: api::BlockId, resolved_block_number: zksync_types::MiniblockNumber, block_timestamp_s: Option, @@ -537,15 +578,19 @@ fn validate_tx_in_sandbox( l1_gas_price: u64, fair_l2_gas_price: u64, enforced_base_fee: Option, + computational_gas_limit: u32, ) -> Result<(), ValidationError> { let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "validate_in_sandbox").entered(); - let validation_params = get_validation_params(&mut connection, &tx); + let validation_params = get_validation_params(&mut connection, &tx, computational_gas_limit); + + let tx: Transaction = tx.into(); let validation_result = apply_vm_in_sandbox( connection, tx, execution_mode, + base_system_contracts, operator_account, block_id, resolved_block_number, @@ -559,8 +604,7 @@ fn validate_tx_in_sandbox( let stage_started_at = Instant::now(); let span = span!(Level::DEBUG, "validation").entered(); - let tx: Transaction = tx.into(); - push_transaction_to_bootloader_memory(vm, &tx, execution_mode); + push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); let result = vm.execute_validation(validation_params); metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "validation"); @@ -596,12 +640,12 @@ fn collect_tx_execution_metrics( .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash)) .sum(); - let (initial_storage_writes, repeated_storage_writes) = - get_initial_and_repeated_storage_writes(result.storage_log_queries.as_slice()); + let writes_metrics = + StorageWritesDeduplicator::apply_on_empty_state(&result.storage_log_queries); TransactionExecutionMetrics { - initial_storage_writes: initial_storage_writes as usize, - repeated_storage_writes: repeated_storage_writes as usize, + initial_storage_writes: writes_metrics.initial_storage_writes, + repeated_storage_writes: writes_metrics.repeated_storage_writes, gas_used: result.gas_used as usize, event_topics, l2_l1_long_messages, @@ -620,9 +664,7 @@ impl From for SandboxExecutionError { fn from(reason: TxRevertReason) -> Self { match reason { TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert(reason.to_string()), - TxRevertReason::TxOutOfGas => { - SandboxExecutionError::Revert(TxRevertReason::TxOutOfGas.to_string()) - } + TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert(reason.to_string()), TxRevertReason::FailedToChargeFee(reason) => { SandboxExecutionError::FailedToChargeFee(reason.to_string()) } @@ -661,3 +703,37 @@ impl From for SandboxExecutionError { } } } + +/// Returns the sum of all oracles' sizes. +fn record_vm_memory_metrics(vm: Box) -> usize { + let event_sink_inner = vm.state.event_sink.get_size(); + let event_sink_history = vm.state.event_sink.get_history_size(); + let memory_inner = vm.state.memory.get_size(); + let memory_history = vm.state.memory.get_history_size(); + let decommittment_processor_inner = vm.state.decommittment_processor.get_size(); + let decommittment_processor_history = vm.state.decommittment_processor.get_history_size(); + let storage_inner = vm.state.storage.get_size(); + let storage_history = vm.state.storage.get_history_size(); + + metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.memory_size", memory_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.memory_size", memory_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.storage_size", storage_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.storage_size", storage_history as f64, "type" => "history"); + + [ + event_sink_inner, + event_sink_history, + memory_inner, + memory_history, + decommittment_processor_inner, + decommittment_processor_history, + storage_inner, + storage_history, + ] + .iter() + .sum::() +} diff --git a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs index 9e0324d5f3db..23195569f1b3 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs @@ -36,6 +36,8 @@ impl RestApi { .route("/network_stats", web::get().to(Self::network_stats)) .route("/blocks", web::get().to(Self::block_pagination)) .route("/block/{number}", web::get().to(Self::block_details)) + .route("/l1_batches", web::get().to(Self::l1_batch_pagination)) + .route("/l1_batch/{number}", web::get().to(Self::l1_batch_details)) .route("/transactions", web::get().to(Self::transaction_pagination)) .route( "/transaction/{hash}", diff --git a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs index 82cfcb529d53..55ca4cd09861 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs @@ -9,10 +9,10 @@ use serde::Serialize; use zksync_types::{ explorer_api::{ AccountDetails, AccountType, AddressDetails, BlocksQuery, ContractDetails, EventsQuery, - PaginationQuery, TransactionsQuery, VerificationIncomingRequest, + L1BatchesQuery, PaginationQuery, TransactionsQuery, VerificationIncomingRequest, }, storage::L2_ETH_TOKEN_ADDRESS, - Address, MiniblockNumber, H256, + Address, L1BatchNumber, MiniblockNumber, H256, }; use super::api_decl::RestApi; @@ -250,6 +250,7 @@ impl RestApi { .get_transactions_page( query.tx_position(), query.block_number, + query.l1_batch_number, query.contract_address, query.pagination, self_.config.api.explorer.offset_limit(), @@ -258,7 +259,9 @@ impl RestApi { .unwrap() }; - let query_type = if query.block_number.is_some() { + let query_type = if query.l1_batch_number.is_some() { + "l1_batch_txs" + } else if query.block_number.is_some() { "block_txs" } else if query.account_address.is_some() { "account_txs" @@ -342,6 +345,57 @@ impl RestApi { } } + #[tracing::instrument(skip(self_))] + pub async fn l1_batch_pagination( + self_: web::Data, + web::Query(query): web::Query, + ) -> ActixResult { + let start = Instant::now(); + if let Err(res) = self_.validate_pagination_query(query.pagination) { + return Ok(res); + } + let last_verified_miniblock = self_.network_stats.read().await.last_verified; + let mut storage = self_.replica_connection_pool.access_storage().await; + + let last_verified_l1_batch = storage + .blocks_web3_dal() + .get_l1_batch_number_of_miniblock(last_verified_miniblock) + .unwrap() + .expect("Verified miniblock must be included in l1 batch"); + + let l1_batches = storage + .explorer() + .blocks_dal() + .get_l1_batches_page(query, last_verified_l1_batch) + .unwrap(); + + metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_pagination"); + ok_json(l1_batches) + } + + #[tracing::instrument(skip(self_))] + pub async fn l1_batch_details( + self_: web::Data, + number: web::Path, + ) -> ActixResult { + let start = Instant::now(); + + let l1_batch_details = self_ + .replica_connection_pool + .access_storage() + .await + .explorer() + .blocks_dal() + .get_l1_batch_details(L1BatchNumber(*number)) + .unwrap(); + + metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_details"); + match l1_batch_details { + Some(l1_batch_details) => ok_json(l1_batch_details), + None => Ok(HttpResponse::NotFound().finish()), + } + } + #[tracing::instrument(skip(self_))] pub async fn token_details( self_: web::Data, diff --git a/core/bin/zksync_core/src/api_server/tx_sender/error.rs b/core/bin/zksync_core/src/api_server/tx_sender/error.rs index c6c37ec9e87a..fbfa3e26c416 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/error.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/error.rs @@ -53,6 +53,17 @@ pub enum SubmitTxError { FeePerGasTooHigh, #[error("max fee per pubdata byte higher than 2^32")] FeePerPubdataByteTooHigh, + /// InsufficientFundsForTransfer is returned if the transaction sender doesn't + /// have enough funds for transfer. + #[error("insufficient balance for transfer")] + InsufficientFundsForTransfer, + /// IntrinsicGas is returned if the transaction is specified to use less gas + /// than required to start the invocation. + #[error("intrinsic gas too low")] + IntrinsicGas, + /// Error returned from main node + #[error("{0}")] + ProxyError(#[from] zksync_web3_decl::jsonrpsee::core::Error), } impl SubmitTxError { pub fn grafana_error_code(&self) -> &'static str { @@ -80,6 +91,9 @@ impl SubmitTxError { SubmitTxError::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", SubmitTxError::FeePerGasTooHigh => "gas-price-limit-too-high", SubmitTxError::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", + SubmitTxError::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", + SubmitTxError::IntrinsicGas => "intrinsic-gas", + SubmitTxError::ProxyError(_) => "proxy-error", } } } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs index 93a40eee3f13..20a078b559d4 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs @@ -1,9 +1,6 @@ //! Helper module to submit transactions into the zkSync Network. - // Built-in uses -use std::num::NonZeroU32; -use std::sync::Arc; -use std::time::Instant; +use std::{cmp::min, num::NonZeroU32, sync::Arc, time::Instant}; // External uses use bigdecimal::BigDecimal; @@ -18,32 +15,48 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_dal::transactions_dal::L2TxSubmissionResult; use zksync_eth_client::clients::http_client::EthereumClient; +use vm::transaction_data::TransactionData; use zksync_types::fee::TransactionExecutionMetrics; -use zksync_types::utils::storage_key_for_eth_balance; + use zksync_types::{ - FAIR_L2_GAS_PRICE, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, + ExecuteTransactionCommon, Transaction, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, + MAX_NEW_FACTORY_DEPS, }; -// Workspace uses + +use zksync_config::ZkSyncConfig; +use zksync_dal::ConnectionPool; + use zksync_types::{ - api, fee::Fee, l2::error::TxCheckError::TxDuplication, AccountTreeId, Address, L2ChainId, U256, + api, + fee::Fee, + get_code_key, get_intrinsic_constants, + l2::error::TxCheckError::TxDuplication, + l2::L2Tx, + tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, L2ChainId, Nonce, H160, H256, U256, }; + +use zksync_contracts::BaseSystemContracts; use zksync_utils::h256_to_u256; -pub mod error; + // Local uses use crate::api_server::execution_sandbox::{ - execute_tx_with_pending_state, get_pubdata_for_factory_deps, validate_tx_with_pending_state, - SandboxExecutionError, + adjust_l1_gas_price_for_tx, execute_tx_with_pending_state, get_pubdata_for_factory_deps, + validate_tx_with_pending_state, SandboxExecutionError, }; use crate::fee_ticker::{error::TickerError, FeeTicker, TokenPriceRequestType}; use crate::gas_adjuster::GasAdjuster; -use crate::gas_tracker::gas_count_from_tx_and_metrics; +use crate::gas_tracker::{gas_count_from_tx_and_metrics, gas_count_from_writes}; use crate::state_keeper::seal_criteria::{SealManager, SealResolution}; +pub mod error; pub use error::SubmitTxError; -use zksync_config::ZkSyncConfig; -use zksync_dal::ConnectionPool; -use zksync_types::{l2::L2Tx, tx::ExecutionMetrics, Nonce}; +use vm::transaction_data::{derive_overhead, OverheadCoeficients}; + +pub mod proxy; +pub use proxy::TxProxy; pub struct TxSenderInner { pub master_connection_pool: ConnectionPool, @@ -58,6 +71,9 @@ pub struct TxSenderInner { // Used to keep track of gas prices for the fee ticker. pub gas_adjuster: Arc>, pub state_keeper_config: StateKeeperConfig, + pub playground_base_system_contracts: BaseSystemContracts, + pub estimate_fee_base_system_contracts: BaseSystemContracts, + pub proxy: Option, } #[derive(Clone)] @@ -75,6 +91,8 @@ impl TxSender { master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, gas_adjuster: Arc>, + playground_base_system_contracts: BaseSystemContracts, + estimate_fee_base_system_contracts: BaseSystemContracts, ) -> Self { let rate_limiter = config .api @@ -87,6 +105,13 @@ impl TxSender { ) }); + let proxy = config + .api + .web3_json_rpc + .main_node_url + .as_ref() + .map(|url| TxProxy::new(url)); + Self(Arc::new(TxSenderInner { chain_id: L2ChainId(config.chain.eth.zksync_network_id), master_connection_pool, @@ -98,6 +123,9 @@ impl TxSender { rate_limiter, gas_adjuster, state_keeper_config: config.chain.state_keeper.clone(), + playground_base_system_contracts, + estimate_fee_base_system_contracts, + proxy, })) } @@ -128,7 +156,8 @@ impl TxSender { ); return Err(SubmitTxError::GasLimitIsTooBig); } - if tx.common_data.fee.max_fee_per_gas < FAIR_L2_GAS_PRICE.into() { + if tx.common_data.fee.max_fee_per_gas < self.0.state_keeper_config.fair_l2_gas_price.into() + { vlog::info!( "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", tx.hash(), @@ -151,6 +180,25 @@ impl TxSender { )); } + let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); + + let (_, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( + l1_gas_price, + self.0.state_keeper_config.fair_l2_gas_price, + ); + + let intrinsic_constants = get_intrinsic_constants(); + if tx.common_data.fee.gas_limit + < U256::from(intrinsic_constants.l2_tx_intrinsic_gas) + + U256::from(intrinsic_constants.l2_tx_intrinsic_pubdata) + * min( + U256::from(gas_per_pubdata_byte), + tx.common_data.fee.gas_per_pubdata_limit, + ) + { + return Err(SubmitTxError::IntrinsicGas); + } + // We still double-check the nonce manually // to make sure that only the correct nonce is submitted and the transaction's hashes never repeat self.validate_account_nonce(&tx)?; @@ -163,18 +211,19 @@ impl TxSender { stage_started_at = Instant::now(); let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); - let fair_l2_gas_price = FAIR_L2_GAS_PRICE; + let fair_l2_gas_price = self.0.state_keeper_config.fair_l2_gas_price; let (tx_metrics, _) = execute_tx_with_pending_state( &self.0.replica_connection_pool, - tx.clone(), + tx.clone().into(), AccountTreeId::new(self.0.fee_account_addr), TxExecutionMode::EthCall, Some(tx.nonce()), U256::zero(), l1_gas_price, - FAIR_L2_GAS_PRICE, + fair_l2_gas_price, Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + &self.0.playground_base_system_contracts, ); vlog::info!( @@ -195,6 +244,10 @@ impl TxSender { l1_gas_price, fair_l2_gas_price, Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + &self.0.playground_base_system_contracts, + self.0 + .state_keeper_config + .validation_computational_gas_limit, ); metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "3_verify_execute"); @@ -204,7 +257,16 @@ impl TxSender { return Err(err.into()); } - self.ensure_tx_executable(&tx, &tx_metrics, true)?; + self.ensure_tx_executable(&tx.clone().into(), &tx_metrics, true)?; + + if let Some(proxy) = &self.0.proxy { + // We're running an external node: we have to proxy the transaction to the main node. + proxy.submit_tx(&tx)?; + proxy.save_tx(tx.hash(), tx); + metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "4_tx_proxy"); + metrics::counter!("server.processed_txs", 1, "stage" => "proxied"); + return Ok(L2TxSubmissionResult::Proxied); + } let nonce = tx.common_data.nonce.0; let hash = tx.hash(); @@ -294,21 +356,13 @@ impl TxSender { return Ok(()); } - let eth_balance_key = storage_key_for_eth_balance(&tx.common_data.initiator_address); - - let balance = self - .0 - .replica_connection_pool - .access_storage_blocking() - .storage_dal() - .get_by_key(ð_balance_key) - .unwrap_or_default(); - let balance = h256_to_u256(balance); + let balance = self.get_balance(&tx.common_data.initiator_address); // Estimate the minimum fee price user will agree to. let gas_price = std::cmp::min( tx.common_data.fee.max_fee_per_gas, - U256::from(FAIR_L2_GAS_PRICE) + tx.common_data.fee.max_priority_fee_per_gas, + U256::from(self.0.state_keeper_config.fair_l2_gas_price) + + tx.common_data.fee.max_priority_fee_per_gas, ); let max_fee = tx.common_data.fee.gas_limit * gas_price; let max_fee_and_value = max_fee + tx.execute.value; @@ -348,8 +402,11 @@ impl TxSender { } let l1_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); - let suggested_gas_price_per_pubdata = - derive_base_fee_and_gas_per_pubdata(l1_gas_price, FAIR_L2_GAS_PRICE).1 as u32; + let suggested_gas_price_per_pubdata = derive_base_fee_and_gas_per_pubdata( + l1_gas_price, + self.0.state_keeper_config.fair_l2_gas_price, + ) + .1 as u32; // If user provided gas per pubdata limit lower than currently suggested // by the server, the users' transaction will not be included in the blocks right away @@ -360,39 +417,91 @@ impl TxSender { Ok(result) } + fn get_balance(&self, initiator_address: &H160) -> U256 { + let eth_balance_key = storage_key_for_eth_balance(initiator_address); + + let balance = self + .0 + .replica_connection_pool + .access_storage_blocking() + .storage_dal() + .get_by_key(ð_balance_key) + .unwrap_or_default(); + + h256_to_u256(balance) + } + pub fn get_txs_fee_in_wei( &self, - mut tx: L2Tx, + mut tx: Transaction, estimated_fee_scale_factor: f64, acceptable_overestimation: u32, ) -> Result { let l1_gas_price = { let effective_gas_price = self.0.gas_adjuster.estimate_effective_gas_price(); - ((effective_gas_price as f64) * self.0.gas_price_scale_factor) as u64 + let current_l1_gas_price = + ((effective_gas_price as f64) * self.0.gas_price_scale_factor) as u64; + + // In order for execution to pass smoothly, we need to ensure that block's required gasPerPubdata will be + // <= to the one in the transaction itself. + adjust_l1_gas_price_for_tx( + current_l1_gas_price, + self.0.state_keeper_config.fair_l2_gas_price, + tx.gas_per_pubdata_byte_limit(), + ) }; - let (base_fee, gas_per_pubdata_byte) = - derive_base_fee_and_gas_per_pubdata(l1_gas_price, FAIR_L2_GAS_PRICE); + let (base_fee, gas_per_pubdata_byte) = { + let (current_base_fee, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( + l1_gas_price, + self.0.state_keeper_config.fair_l2_gas_price, + ); + let enforced_base_fee = std::cmp::min(tx.max_fee_per_gas().as_u64(), current_base_fee); + + (enforced_base_fee, gas_per_pubdata_byte) + }; + + let hashed_key = get_code_key(&tx.initiator_account()); + // if the default account does not have enough funds + // for transferring tx.value, without taking into account the fee, + // there is no sense to estimate the fee + let account_code_hash = self + .0 + .replica_connection_pool + .access_storage_blocking() + .storage_dal() + .get_by_key(&hashed_key) + .unwrap_or_default(); - // If no signature has been provided, we will use the correctly-formatted EOA - // dummy signature - if tx.common_data.signature.is_empty() { - tx.common_data.signature = vec![0u8; 65]; - tx.common_data.signature[64] = 27; + if !tx.is_l1() + && account_code_hash == H256::zero() + && tx.execute.value > self.get_balance(&tx.initiator_account()) + { + vlog::info!( + "fee estimation failed on validation step. + account: {} does not have enough funds for for transferring tx.value: {}.", + &tx.initiator_account(), + tx.execute.value + ); + return Err(SubmitTxError::InsufficientFundsForTransfer); } - /// The calculated transaction length below does not include the signature and most likely - /// most of the paymasterInput. We will assume that those take no more than 64 slots (2048 bytes) - /// in total. If they do, the user should provide manually a higher gasLimit. - /// - /// In the future, a more advanced protocol for getting the fee for transactions from custom accounts - /// will be used. - const TX_LENGTH_OVERHEAD: usize = 64; + // For L2 transactions we need a properly formatted signature + if let ExecuteTransactionCommon::L2(l2_common_data) = &mut tx.common_data { + if l2_common_data.signature.is_empty() { + l2_common_data.signature = vec![0u8; 65]; + l2_common_data.signature[64] = 27; + } - let _tx_encoded_len = tx.abi_encoding_len() + TX_LENGTH_OVERHEAD; + l2_common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); + } // We already know how many gas is needed to cover for the publishing of the bytecodes. - let gas_for_bytecodes_pubdata = { + // For L1->L2 transactions all the bytecodes have been made available on L1, so no funds need to be + // spent on re-publishing those. + let gas_for_bytecodes_pubdata = if tx.is_l1() { + 0 + } else { let pubdata_for_factory_deps = get_pubdata_for_factory_deps( &self.0.replica_connection_pool, &tx.execute.factory_deps, @@ -410,27 +519,54 @@ impl TxSender { let mut lower_bound = 0; let mut upper_bound = MAX_L2_TX_GAS_LIMIT as u32; - tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); - // Given the gas_limit to be used for the body of the transaction, // returns the result for executing the transaction with such gas_limit let mut execute = |tx_gas_limit: u32| { - let gas_limit_with_overhead = tx_gas_limit; + let gas_limit_with_overhead = tx_gas_limit + + derive_overhead( + tx_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + ); + + match &mut tx.common_data { + ExecuteTransactionCommon::L1(l1_common_data) => { + l1_common_data.gas_limit = gas_limit_with_overhead.into(); + + let required_funds = l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + + tx.execute.value; + + l1_common_data.to_mint = required_funds; + } + ExecuteTransactionCommon::L2(l2_common_data) => { + l2_common_data.fee.gas_limit = gas_limit_with_overhead.into(); + } + } - tx.common_data.fee.gas_limit = gas_limit_with_overhead.into(); - let initial_paid_fee = - U256::from(gas_limit_with_overhead) * tx.common_data.fee.max_fee_per_gas; + let enforced_nonce = match &tx.common_data { + ExecuteTransactionCommon::L2(data) => Some(data.nonce), + _ => None, + }; + + // For L2 transactions we need to explicitly put enough balance into the account of the users + // while for L1->L2 transactions the `to_mint` field plays this role + let added_balance = match &tx.common_data { + ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, + _ => U256::zero(), + }; let (tx_metrics, exec_result) = execute_tx_with_pending_state( &self.0.replica_connection_pool, tx.clone(), AccountTreeId::new(self.0.fee_account_addr), TxExecutionMode::EstimateFee, - Some(tx.nonce()), - initial_paid_fee, + enforced_nonce, + added_balance, l1_gas_price, - FAIR_L2_GAS_PRICE, - Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + self.0.state_keeper_config.fair_l2_gas_price, + Some(base_fee), + &self.0.estimate_fee_base_system_contracts, ); self.ensure_tx_executable(&tx, &tx_metrics, false) @@ -474,7 +610,12 @@ impl TxSender { match execute(tx_body_gas_limit + gas_for_bytecodes_pubdata) { Err(err) => Err(err.into()), Ok(_) => { - let overhead = 0; + let overhead = derive_overhead( + tx_body_gas_limit + gas_for_bytecodes_pubdata, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + ); let full_gas_limit = match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { @@ -511,20 +652,18 @@ impl TxSender { derive_base_fee_and_gas_per_pubdata( (gas_price as f64 * self.0.gas_price_scale_factor).round() as u64, - FAIR_L2_GAS_PRICE, + self.0.state_keeper_config.fair_l2_gas_price, ) .0 } fn ensure_tx_executable( &self, - transaction: &L2Tx, + transaction: &Transaction, tx_metrics: &TransactionExecutionMetrics, log_message: bool, ) -> Result<(), SubmitTxError> { let execution_metrics = ExecutionMetrics { - initial_storage_writes: tx_metrics.initial_storage_writes, - repeated_storage_writes: tx_metrics.repeated_storage_writes, published_bytecode_bytes: tx_metrics.published_bytecode_bytes, l2_l1_long_messages: tx_metrics.l2_l1_long_messages, l2_l1_logs: tx_metrics.l2_l1_logs, @@ -536,10 +675,16 @@ impl TxSender { total_log_queries: tx_metrics.total_log_queries, cycles_used: tx_metrics.cycles_used, }; + let writes_metrics = DeduplicatedWritesMetrics { + initial_storage_writes: tx_metrics.initial_storage_writes, + repeated_storage_writes: tx_metrics.repeated_storage_writes, + }; // In api server it's ok to expect that all writes are initial it's safer - let tx_gas_count = - gas_count_from_tx_and_metrics(&transaction.clone().into(), &execution_metrics); + let tx_gas_count = gas_count_from_tx_and_metrics(&transaction.clone(), &execution_metrics) + + gas_count_from_writes(&writes_metrics); + let tx_data: TransactionData = transaction.clone().into(); + let tx_encoding_size = tx_data.into_tokens().len(); for sealer in &SealManager::get_default_sealers() { let seal_resolution = sealer.should_seal( @@ -550,6 +695,10 @@ impl TxSender { execution_metrics, tx_gas_count, tx_gas_count, + tx_encoding_size, + tx_encoding_size, + writes_metrics, + writes_metrics, ); if matches!(seal_resolution, SealResolution::Unexecutable(_)) { let message = format!( diff --git a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs new file mode 100644 index 000000000000..eb7fafe0f829 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs @@ -0,0 +1,60 @@ +use std::collections::HashMap; +use std::sync::RwLock; + +use zksync_types::{ + api::{BlockId, Transaction, TransactionId}, + l2::L2Tx, + H256, +}; +use zksync_web3_decl::{ + jsonrpsee::core::Error as JsonrpseeError, + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::EthNamespaceClient, +}; + +/// Used by external node to proxy transaction to the main node +/// and store them while they're not synced back yet +pub struct TxProxy { + tx_cache: RwLock>, + client: HttpClient, +} + +impl TxProxy { + pub fn new(main_node_url: &str) -> Self { + Self { + client: HttpClientBuilder::default() + .build(main_node_url) + .expect("Failed to create HTTP client"), + tx_cache: RwLock::new(HashMap::new()), + } + } + + pub fn find_tx(&self, tx_hash: H256) -> Option { + self.tx_cache.read().unwrap().get(&tx_hash).cloned() + } + + pub fn forget_tx(&self, tx_hash: H256) { + self.tx_cache.write().unwrap().remove(&tx_hash); + } + + pub fn save_tx(&self, tx_hash: H256, tx: L2Tx) { + self.tx_cache.write().unwrap().insert(tx_hash, tx); + } + + pub fn submit_tx(&self, tx: &L2Tx) -> Result { + let raw_tx = zksync_types::Bytes(tx.common_data.input_data().expect("raw tx is absent")); + async_std::task::block_on(self.client.send_raw_transaction(raw_tx)) + } + + pub fn request_tx(&self, id: TransactionId) -> Result, JsonrpseeError> { + async_std::task::block_on(match id { + TransactionId::Block(BlockId::Hash(block), index) => self + .client + .get_transaction_by_block_hash_and_index(block, index), + TransactionId::Block(BlockId::Number(block), index) => self + .client + .get_transaction_by_block_number_and_index(block, index), + TransactionId::Hash(hash) => self.client.get_transaction_by_hash(hash), + }) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index e5c198af08a6..2361ab1872a6 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -9,7 +9,7 @@ use jsonrpc_derive::rpc; // Workspace uses use zksync_types::{ api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails}, - explorer_api::BlockDetails, + explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, transaction_request::CallRequest, vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, @@ -27,6 +27,9 @@ pub trait ZksNamespaceT { #[rpc(name = "zks_estimateFee", returns = "Fee")] fn estimate_fee(&self, req: CallRequest) -> Result; + #[rpc(name = "zks_estimateGasL1ToL2", returns = "U256")] + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> Result; + #[rpc(name = "zks_getMainContract", returns = "Address")] fn get_main_contract(&self) -> Result
; @@ -97,6 +100,18 @@ pub trait ZksNamespaceT { returns = "Option" )] fn get_transaction_details(&self, hash: H256) -> Result>; + + #[rpc( + name = "zks_getRawBlockTransactions", + returns = "Vec" + )] + fn get_raw_block_transactions( + &self, + block_number: MiniblockNumber, + ) -> Result>; + + #[rpc(name = "zks_getL1BatchDetails", returns = "Option")] + fn get_l1_batch_details(&self, batch: L1BatchNumber) -> Result>; } impl ZksNamespaceT for ZksNamespace { @@ -104,6 +119,11 @@ impl ZksNamespaceT for ZksNamespace { self.estimate_fee_impl(req).map_err(into_jsrpc_error) } + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> Result { + self.estimate_l1_to_l2_gas_impl(req) + .map_err(into_jsrpc_error) + } + fn get_main_contract(&self) -> Result
{ Ok(self.get_main_contract_impl()) } @@ -197,4 +217,17 @@ impl ZksNamespaceT for ZksNamespace { #[cfg(not(feature = "openzeppelin_tests"))] Err(into_jsrpc_error(Web3Error::NotImplemented)) } + + fn get_raw_block_transactions( + &self, + block_number: MiniblockNumber, + ) -> Result> { + self.get_raw_block_transactions_impl(block_number) + .map_err(into_jsrpc_error) + } + + fn get_l1_batch_details(&self, batch: L1BatchNumber) -> Result> { + self.get_l1_batch_details_impl(batch) + .map_err(into_jsrpc_error) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index 69a6508cd77f..372825c2a5a0 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -3,7 +3,7 @@ use bigdecimal::BigDecimal; use std::collections::HashMap; use zksync_types::{ api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails, U64}, - explorer_api::BlockDetails, + explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, transaction_request::CallRequest, vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, @@ -21,6 +21,11 @@ impl ZksNamespaceServer for ZksNamespace { .map_err(|err| CallError::from_std_error(err).into()) } + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult { + self.estimate_l1_to_l2_gas_impl(req) + .map_err(|err| CallError::from_std_error(err).into()) + } + fn get_main_contract(&self) -> RpcResult
{ Ok(self.get_main_contract_impl()) } @@ -110,4 +115,20 @@ impl ZksNamespaceServer for ZksNamespace { self.get_transaction_details_impl(hash) .map_err(|err| CallError::from_std_error(err).into()) } + + fn get_raw_block_transactions( + &self, + block_number: MiniblockNumber, + ) -> RpcResult> { + self.get_raw_block_transactions_impl(block_number) + .map_err(|err| CallError::from_std_error(err).into()) + } + + fn get_l1_batch_details( + &self, + batch_number: L1BatchNumber, + ) -> RpcResult> { + self.get_l1_batch_details_impl(batch_number) + .map_err(|err| CallError::from_std_error(err).into()) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/mod.rs b/core/bin/zksync_core/src/api_server/web3/mod.rs index e06c3705ac0a..06eec3e4f741 100644 --- a/core/bin/zksync_core/src/api_server/web3/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/mod.rs @@ -33,6 +33,7 @@ use backend_jsonrpc::{ use namespaces::{EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, ZksNamespace}; use pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; use state::{Filters, RpcState}; +use zksync_contracts::{ESTIMATE_FEE_BLOCK_CODE, PLAYGROUND_BLOCK_BOOTLOADER_CODE}; pub mod backend_jsonrpc; pub mod backend_jsonrpsee; @@ -56,11 +57,26 @@ impl RpcState { gas_adjuster: Arc>, ) -> Self { let config = get_config(); + let mut storage = replica_connection_pool.access_storage_blocking(); + + let base_system_contracts = storage.storage_dal().get_base_system_contracts( + config.chain.state_keeper.bootloader_hash, + config.chain.state_keeper.default_aa_hash, + ); + + let mut playground_base_system_contracts = base_system_contracts.clone(); + let mut estimate_fee_base_system_contracts = base_system_contracts; + playground_base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); + estimate_fee_base_system_contracts.bootloader = ESTIMATE_FEE_BLOCK_CODE.clone(); + + drop(storage); let tx_sender = TxSender::new( config, master_connection_pool, replica_connection_pool.clone(), gas_adjuster, + playground_base_system_contracts, + estimate_fee_base_system_contracts, ); let accounts = if cfg!(feature = "openzeppelin_tests") { diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs index 4db3339f8ce9..95954524e860 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,4 +1,3 @@ -use std::convert::TryInto; use std::time::Instant; use itertools::Itertools; @@ -9,11 +8,11 @@ use zksync_types::{ TransactionVariant, }, l2::{L2Tx, TransactionType}, - transaction_request::CallRequest, + transaction_request::{l2_tx_from_call_req, CallRequest}, utils::decompose_full_nonce, web3::types::SyncState, - AccountTreeId, Bytes, L2ChainId, MiniblockNumber, StorageKey, FAIR_L2_GAS_PRICE, H256, - L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, U256, + AccountTreeId, Bytes, L2ChainId, MiniblockNumber, StorageKey, H256, L2_ETH_TOKEN_ADDRESS, + MAX_GAS_PER_PUBDATA_BYTE, U256, }; use zksync_web3_decl::{ @@ -21,9 +20,10 @@ use zksync_web3_decl::{ types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, }; -use crate::api_server::execution_sandbox::execute_tx_eth_call; -use crate::api_server::web3::backend_jsonrpc::error::internal_error; -use crate::api_server::web3::state::RpcState; +use crate::api_server::{ + execution_sandbox::execute_tx_eth_call, web3::backend_jsonrpc::error::internal_error, + web3::state::RpcState, +}; use zksync_utils::u256_to_h256; @@ -80,13 +80,15 @@ impl EthNamespace { let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); #[cfg(not(feature = "openzeppelin_tests"))] - let tx: L2Tx = request.try_into().map_err(Web3Error::SerializationError)?; + let tx = l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; #[cfg(feature = "openzeppelin_tests")] let tx: L2Tx = self - .convert_evm_like_deploy_requests(request.into())? - .try_into() - .map_err(Web3Error::SerializationError)?; + .convert_evm_like_deploy_requests(tx_req_from_call_req( + request, + self.state.config.api.web3_json_rpc.max_tx_size, + )?)? + .try_into()?; let enforced_base_fee = Some(tx.common_data.fee.max_fee_per_gas.as_u64()); let result = execute_tx_eth_call( @@ -98,8 +100,9 @@ impl EthNamespace { .0 .gas_adjuster .estimate_effective_gas_price(), - FAIR_L2_GAS_PRICE, + self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price, enforced_base_fee, + &self.state.tx_sender.0.playground_base_system_contracts, )?; let mut res_bytes = match result.revert_reason { @@ -136,13 +139,16 @@ impl EthNamespace { let is_eip712 = request.eip712_meta.is_some(); #[cfg(not(feature = "openzeppelin_tests"))] - let mut tx: L2Tx = request.try_into().map_err(Web3Error::SerializationError)?; + let mut tx: L2Tx = + l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; #[cfg(feature = "openzeppelin_tests")] let mut tx: L2Tx = self - .convert_evm_like_deploy_requests(request.into())? - .try_into() - .map_err(Web3Error::SerializationError)?; + .convert_evm_like_deploy_requests(tx_req_from_call_req( + request, + self.state.config.api.web3_json_rpc.max_tx_size, + )?)? + .try_into()?; // The user may not include the proper transaction type during the estimation of // the gas fee. However, it is needed for the bootloader checks to pass properly. @@ -174,7 +180,7 @@ impl EthNamespace { let fee = self .state .tx_sender - .get_txs_fee_in_wei(tx, scale_factor, acceptable_overestimation) + .get_txs_fee_in_wei(tx.into(), scale_factor, acceptable_overestimation) .map_err(|err| Web3Error::SubmitTransactionError(err.to_string()))?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas"); @@ -410,7 +416,7 @@ impl EthNamespace { let start = Instant::now(); let endpoint_name = "get_transaction"; - let transaction = self + let mut transaction = self .state .connection_pool .access_storage_blocking() @@ -418,6 +424,28 @@ impl EthNamespace { .get_transaction(id, L2ChainId(self.state.config.chain.eth.zksync_network_id)) .map_err(|err| internal_error(endpoint_name, err)); + if let Some(proxy) = &self.state.tx_sender.0.proxy { + // We're running an external node - check the proxy cache in + // case the transaction was proxied but not yet synced back to us + if let Ok(Some(tx)) = &transaction { + // If the transaction is already in the db, remove it from cache + proxy.forget_tx(tx.hash) + } else { + if let TransactionId::Hash(hash) = id { + // If the transaction is not in the db, check the cache + if let Some(tx) = proxy.find_tx(hash) { + transaction = Ok(Some(tx.into())); + } + } + if !matches!(transaction, Ok(Some(_))) { + // If the transaction is not in the db or cache, query main node + transaction = proxy + .request_tx(id) + .map_err(|err| internal_error(endpoint_name, err)); + } + } + } + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); transaction } @@ -704,14 +732,15 @@ impl EthNamespace { }; let mut eip712_meta = Eip712Meta::default(); eip712_meta.gas_per_pubdata = U256::from(MAX_GAS_PER_PUBDATA_BYTE); + let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; let transaction_request = TransactionRequest { nonce, from: Some(transaction_request.from), to: transaction_request.to, value: transaction_request.value.unwrap_or(U256::from(0)), - gas_price: U256::from(FAIR_L2_GAS_PRICE), + gas_price: U256::from(fair_l2_gas_price), gas: transaction_request.gas.unwrap(), - max_priority_fee_per_gas: Some(U256::from(FAIR_L2_GAS_PRICE)), + max_priority_fee_per_gas: Some(U256::from(fair_l2_gas_price)), input: transaction_request.data.unwrap_or_default(), v: None, r: None, diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs index 24dc0bbb122f..6ac4765c437c 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,21 +1,20 @@ use bigdecimal::{BigDecimal, Zero}; -use core::convert::TryInto; -use std::collections::HashMap; use std::time::Instant; +use std::{collections::HashMap, convert::TryInto}; use zksync_mini_merkle_tree::mini_merkle_tree_proof; use zksync_types::{ api::{BridgeAddresses, GetLogsFilter, L2ToL1LogProof, TransactionDetails, U64}, commitment::CommitmentSerializable, - explorer_api::BlockDetails, + explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, - l2::L2Tx, + l1::L1Tx, l2_to_l1_log::L2ToL1Log, tokens::ETHEREUM_ADDRESS, - transaction_request::CallRequest, + transaction_request::{l2_tx_from_call_req, CallRequest}, vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, - L1BatchNumber, MiniblockNumber, FAIR_L2_GAS_PRICE, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, - MAX_GAS_PER_PUBDATA_BYTE, U256, + L1BatchNumber, MiniblockNumber, Transaction, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, + MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, }; use zksync_utils::address_to_h256; use zksync_web3_decl::{ @@ -43,14 +42,42 @@ impl ZksNamespace { pub fn estimate_fee_impl(&self, request: CallRequest) -> Result { let start = Instant::now(); - let mut tx: L2Tx = request.try_into().map_err(Web3Error::SerializationError)?; + let mut tx = l2_tx_from_call_req(request, self.state.config.api.web3_json_rpc.max_tx_size)?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. - tx.common_data.fee.max_fee_per_gas = FAIR_L2_GAS_PRICE.into(); - tx.common_data.fee.max_priority_fee_per_gas = FAIR_L2_GAS_PRICE.into(); + let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; + tx.common_data.fee.max_fee_per_gas = fair_l2_gas_price.into(); + tx.common_data.fee.max_priority_fee_per_gas = fair_l2_gas_price.into(); tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); + let fee = self.estimate_fee(tx.into())?; + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_fee"); + Ok(fee) + } + + #[tracing::instrument(skip(self, request))] + pub fn estimate_l1_to_l2_gas_impl(&self, request: CallRequest) -> Result { + let start = Instant::now(); + + let mut tx: L1Tx = request.try_into().map_err(Web3Error::SerializationError)?; + + // When we're estimating fee, we are trying to deduce values related to fee, so we should + // not consider provided ones. + let fair_l2_gas_price = self.state.tx_sender.0.state_keeper_config.fair_l2_gas_price; + tx.common_data.max_fee_per_gas = fair_l2_gas_price.into(); + if tx.common_data.gas_per_pubdata_limit == U256::zero() { + tx.common_data.gas_per_pubdata_limit = REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(); + } + + let fee = self.estimate_fee(tx.into())?; + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas_l1_to_l2"); + Ok(fee.gas_limit) + } + + fn estimate_fee(&self, tx: Transaction) -> Result { let scale_factor = self .state .config @@ -70,7 +97,6 @@ impl ZksNamespace { .get_txs_fee_in_wei(tx, scale_factor, acceptable_overestimation) .map_err(|err| Web3Error::SubmitTransactionError(err.to_string()))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_fee"); Ok(fee) } @@ -454,6 +480,27 @@ impl ZksNamespace { block_details } + #[tracing::instrument(skip(self))] + pub fn get_raw_block_transactions_impl( + &self, + block_number: MiniblockNumber, + ) -> Result, Web3Error> { + let start = Instant::now(); + let endpoint_name = "get_raw_block_transactions"; + + let transactions = self + .state + .connection_pool + .access_storage_blocking() + .transactions_web3_dal() + .get_raw_miniblock_transactions(block_number) + .map_err(|err| internal_error(endpoint_name, err)); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + + transactions + } + #[tracing::instrument(skip(self))] pub fn get_transaction_details_impl( &self, @@ -475,6 +522,28 @@ impl ZksNamespace { tx_details } + #[tracing::instrument(skip(self))] + pub fn get_l1_batch_details_impl( + &self, + batch_number: L1BatchNumber, + ) -> Result, Web3Error> { + let start = Instant::now(); + let endpoint_name = "get_l1_batch"; + + let l1_batch = self + .state + .connection_pool + .access_storage_blocking() + .explorer() + .blocks_dal() + .get_l1_batch_details(batch_number) + .map_err(|err| internal_error(endpoint_name, err)); + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + + l1_batch + } + #[cfg(feature = "openzeppelin_tests")] /// Saves contract bytecode to memory. pub fn set_known_bytecode_impl(&self, bytecode: Bytes) -> bool { diff --git a/core/bin/zksync_core/src/api_server/web3/state.rs b/core/bin/zksync_core/src/api_server/web3/state.rs index 58a1c73d655d..020e1299d62c 100644 --- a/core/bin/zksync_core/src/api_server/web3/state.rs +++ b/core/bin/zksync_core/src/api_server/web3/state.rs @@ -35,7 +35,11 @@ pub struct RpcState { impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.config.chain.eth.zksync_network_id; - let (tx_request, hash) = TransactionRequest::from_bytes(bytes, chain_id)?; + let (tx_request, hash) = TransactionRequest::from_bytes( + bytes, + chain_id, + self.config.api.web3_json_rpc.max_tx_size, + )?; Ok((tx_request.try_into()?, hash)) } diff --git a/core/bin/zksync_core/src/bin/block_reverter.rs b/core/bin/zksync_core/src/bin/block_reverter.rs index 6cc55db8c720..937954ba832b 100644 --- a/core/bin/zksync_core/src/bin/block_reverter.rs +++ b/core/bin/zksync_core/src/bin/block_reverter.rs @@ -270,7 +270,7 @@ impl BlockReverter { let signed_tx = eth_gateway .sign_prepared_tx_for_addr( raw_tx.clone(), - self.config.contracts.diamond_proxy_addr, + self.config.contracts.validator_timelock_addr, Options::with(|opt| { opt.gas = Some(5_000_000.into()); opt.max_priority_fee_per_gas = Some(priority_fee_per_gas); @@ -422,7 +422,7 @@ enum Opt { #[tokio::main] async fn main() -> anyhow::Result<()> { - vlog::init(); + let _sentry_guard = vlog::init(); let config = ZkSyncConfig::from_env(); let connection_pool = ConnectionPool::new(None, true); let mut block_reverter = BlockReverter { diff --git a/core/bin/zksync_core/src/bin/en_playground.rs b/core/bin/zksync_core/src/bin/en_playground.rs new file mode 100644 index 000000000000..b1450c03208e --- /dev/null +++ b/core/bin/zksync_core/src/bin/en_playground.rs @@ -0,0 +1,66 @@ +//! This file is a playground binary for the External Node development. +//! It's temporary and once a PoC is ready, this file will be replaced by the real EN entrypoint. +use zksync_config::ZkSyncConfig; +use zksync_core::{ + state_keeper::{seal_criteria::SealManager, ZkSyncStateKeeper}, + sync_layer::{ + batch_status_updater::run_batch_status_updater, external_io::ExternalIO, + fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, + mock_batch_executor::MockBatchExecutorBuilder, ActionQueue, ExternalNodeSealer, + }, +}; +use zksync_dal::ConnectionPool; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let _sentry_guard = vlog::init(); + let connection_pool = ConnectionPool::new(None, true); + let config = ZkSyncConfig::from_env(); + + vlog::info!("Started the EN playground"); + + perform_genesis_if_needed(&mut connection_pool.access_storage().await, &config).await; + + let action_queue = ActionQueue::new(); + let en_sealer = ExternalNodeSealer::new(action_queue.clone()); + let sealer = SealManager::custom( + config.chain.state_keeper.clone(), + Vec::new(), + en_sealer.clone().into_unconditional_batch_seal_criterion(), + en_sealer.clone().into_miniblock_seal_criterion(), + ); + + let mock_batch_executor_base = Box::new(MockBatchExecutorBuilder); + + let io = Box::new(ExternalIO::new(Address::default(), action_queue.clone())); + let (_stop_sender, stop_receiver) = tokio::sync::watch::channel::(false); + + let state_keeper = ZkSyncStateKeeper::new(stop_receiver, io, mock_batch_executor_base, sealer); + + // Different envs for the ease of local testing. + // Localhost + // let main_node_url = std::env::var("API_WEB3_JSON_RPC_HTTP_URL").unwrap(); + // Stage + // let main_node_url = "https://z2-dev-api.zksync.dev:443"; + // Testnet + // let main_node_url = "https://zksync2-testnet.zksync.dev:443"; + // Mainnet (doesn't work yet) + // let main_node_url = "https://zksync2-mainnet.zksync.io:443"; + + let fetcher = MainNodeFetcher::new( + &config.api.web3_json_rpc.main_node_url.unwrap(), + L1BatchNumber(0), + MiniblockNumber(1), + L1BatchNumber(0), + L1BatchNumber(0), + L1BatchNumber(0), + action_queue.clone(), + ); + + let _updater_handle = std::thread::spawn(move || run_batch_status_updater(action_queue)); + let _sk_handle = tokio::task::spawn_blocking(|| state_keeper.run()); + fetcher.run().await; + + Ok(()) +} diff --git a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs index ff6c1708bc58..b84549eceab2 100644 --- a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs +++ b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs @@ -4,7 +4,7 @@ use zksync_storage::db::Database; use zksync_storage::RocksDB; fn main() { - vlog::init(); + let _sentry_guard = vlog::init(); let config = ZkSyncConfig::from_env(); let db = RocksDB::new(Database::MerkleTree, config.db.path(), true); let tree = ZkSyncTree::new(db); diff --git a/core/bin/zksync_core/src/bin/zksync_server.rs b/core/bin/zksync_core/src/bin/zksync_server.rs index 87e6410eb520..1fc8cee2ea89 100644 --- a/core/bin/zksync_core/src/bin/zksync_server.rs +++ b/core/bin/zksync_core/src/bin/zksync_server.rs @@ -27,7 +27,7 @@ struct Opt { /// comma-separated list of components to launch #[structopt( long, - default_value = "api,tree,tree_lightweight,eth,data_fetcher,state_keeper,witness_generator" + default_value = "api,tree,tree_lightweight,eth,data_fetcher,state_keeper,witness_generator,housekeeper" )] components: ComponentsToRun, } diff --git a/core/bin/zksync_core/src/eth_sender/aggregator.rs b/core/bin/zksync_core/src/eth_sender/aggregator.rs index 7094da1b40d0..7cdd357eb3f0 100644 --- a/core/bin/zksync_core/src/eth_sender/aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/aggregator.rs @@ -3,6 +3,7 @@ use crate::eth_sender::block_publish_criterion::{ TimestampDeadlineCriterion, }; use zksync_config::configs::eth_sender::{ProofSendingMode, SenderConfig}; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; use zksync_types::aggregated_operations::{ AggregatedActionType, AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, @@ -81,6 +82,7 @@ impl Aggregator { pub async fn get_next_ready_operation( &mut self, storage: &mut StorageProcessor<'_>, + base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Option { let last_sealed_block_number = storage.blocks_dal().get_sealed_block_number(); if let Some(op) = self @@ -106,6 +108,7 @@ impl Aggregator { storage, self.config.max_aggregated_blocks_to_commit as usize, last_sealed_block_number, + base_system_contracts_hashes, ) .await .map(AggregatedOperation::CommitBlocks) @@ -118,7 +121,10 @@ impl Aggregator { limit: usize, last_sealed_block: L1BatchNumber, ) -> Option { - let ready_for_execute_blocks = storage.blocks_dal().get_ready_for_execute_blocks(limit); + let ready_for_execute_blocks = storage.blocks_dal().get_ready_for_execute_blocks( + limit, + self.config.l1_batch_min_age_before_execute_seconds, + ); let blocks = extract_ready_subrange( storage, &mut self.execute_criterion, @@ -135,11 +141,28 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, limit: usize, last_sealed_block: L1BatchNumber, + base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Option { let mut blocks_dal = storage.blocks_dal(); let last_block = blocks_dal.get_last_committed_to_eth_block()?; - let ready_for_commit_blocks = blocks_dal.get_ready_for_commit_blocks(limit); + + let ready_for_commit_blocks = blocks_dal.get_ready_for_commit_blocks( + limit, + base_system_contracts_hashes.bootloader, + base_system_contracts_hashes.default_aa, + ); + + // Check that the blocks that are selected are sequential + ready_for_commit_blocks + .iter() + .reduce(|last_block, next_block| { + if last_block.header.number + 1 == next_block.header.number { + next_block + } else { + panic!("Blocks are not sequential") + } + }); let blocks = extract_ready_subrange( storage, diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 67ae2a3702be..1cc0d6990927 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -5,10 +5,10 @@ use crate::gas_tracker::agg_block_base_cost; use std::cmp::max; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::aggregated_operations::AggregatedOperation; -use zksync_types::eth_sender::EthTx; -use zksync_types::Address; +use zksync_eth_client::clients::http_client::EthereumClient; +use zksync_types::{aggregated_operations::AggregatedOperation, eth_sender::EthTx, Address, H256}; /// The component is responsible for aggregating l1 batches into eth_txs: /// Such as CommitBlocks, PublishProofBlocksOnchain and ExecuteBlock @@ -39,8 +39,17 @@ impl EthTxAggregator { } } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + mut self, + pool: ConnectionPool, + eth_client: EthereumClient, + stop_receiver: watch::Receiver, + ) { loop { + let base_system_contracts_hashes = self + .get_l1_base_system_contracts_hashes(ð_client) + .await + .unwrap(); let mut storage = pool.access_storage().await; if *stop_receiver.borrow() { @@ -48,22 +57,59 @@ impl EthTxAggregator { break; } - if let Err(e) = self.loop_iteration(&mut storage).await { + if let Err(e) = self + .loop_iteration(&mut storage, base_system_contracts_hashes) + .await + { // Web3 API request failures can cause this, // and anything more important is already properly reported. vlog::warn!("eth_sender error {:?}", e); } - tokio::time::sleep(self.config.tx_poll_period()).await; + tokio::time::sleep(self.config.aggregate_tx_poll_period()).await; } } - #[tracing::instrument(skip(self, storage))] + async fn get_l1_base_system_contracts_hashes( + &mut self, + eth_client: &EthereumClient, + ) -> Result { + let bootloader_code_hash: H256 = eth_client + .call_main_contract_function( + "getL2BootloaderBytecodeHash", + (), + None, + Default::default(), + None, + ) + .await?; + + let default_account_code_hash: H256 = eth_client + .call_main_contract_function( + "getL2DefaultAccountBytecodeHash", + (), + None, + Default::default(), + None, + ) + .await?; + Ok(BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + }) + } + + #[tracing::instrument(skip(self, storage, base_system_contracts_hashes))] async fn loop_iteration( &mut self, storage: &mut StorageProcessor<'_>, + base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Result<(), ETHSenderError> { - if let Some(agg_op) = self.aggregator.get_next_ready_operation(storage).await { + if let Some(agg_op) = self + .aggregator + .get_next_ready_operation(storage, base_system_contracts_hashes) + .await + { let tx = self.save_eth_tx(storage, &agg_op).await?; Self::log_eth_tx_saving(storage, agg_op, &tx).await; } diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs index 60457df09bdc..3f9843a83fca 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -166,8 +166,8 @@ impl EthTxManager { return Err(ETHSenderError::from(Error::from(Web3Error::Internal))); } - // Increase `priority_fee_per_gas` by at least 10% to prevent "replacement transaction underpriced" error. - Ok((previous_priority_fee + (previous_priority_fee / 10) + 1) + // Increase `priority_fee_per_gas` by at least 20% to prevent "replacement transaction underpriced" error. + Ok((previous_priority_fee + (previous_priority_fee / 5) + 1) .max(self.gas_adjuster.get_priority_fee())) } diff --git a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs index 904bdcd88640..d3fe029bf230 100644 --- a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs +++ b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs @@ -15,11 +15,6 @@ pub fn track_eth_tx_metrics(connection: &mut StorageProcessor<'_>, l1_stage: &st return; } - metrics::gauge!( - "server.block_number", - blocks.last().unwrap().number.0 as f64, - "stage" => stage.clone() - ); for block in blocks { metrics::histogram!( "server.block_latency", diff --git a/core/bin/zksync_core/src/eth_sender/tests.rs b/core/bin/zksync_core/src/eth_sender/tests.rs index e85dfa014775..2ec6d79fb640 100644 --- a/core/bin/zksync_core/src/eth_sender/tests.rs +++ b/core/bin/zksync_core/src/eth_sender/tests.rs @@ -37,7 +37,7 @@ impl EthSenderTester { let eth_sender_config = ETHSenderConfig::from_env(); let aggregator_config = SenderConfig { aggregated_proof_sizes: vec![1], - ..eth_sender_config.sender + ..eth_sender_config.sender.clone() }; let gateway = Arc::new(MockEthereum::default().with_fee_history(history)); diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs index 78000624498d..57ff542ce2c7 100644 --- a/core/bin/zksync_core/src/eth_watch/tests.rs +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -125,6 +125,7 @@ fn build_tx(serial_id: u64, eth_block: u64) -> L1Tx { eth_hash: [2; 32].into(), eth_block, gas_limit: Default::default(), + max_fee_per_gas: Default::default(), gas_per_pubdata_limit: 1u32.into(), full_fee: Default::default(), layer_2_tip_fee: U256::from(10u8), diff --git a/core/bin/zksync_core/src/gas_adjuster/mod.rs b/core/bin/zksync_core/src/gas_adjuster/mod.rs index 351b419699f8..867861002eb0 100644 --- a/core/bin/zksync_core/src/gas_adjuster/mod.rs +++ b/core/bin/zksync_core/src/gas_adjuster/mod.rs @@ -5,7 +5,6 @@ use std::collections::VecDeque; use std::sync::{Arc, RwLock}; use tokio::sync::watch::Receiver; use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; -use zksync_types::FAIR_L2_GAS_PRICE; use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::http_client::Error, EthInterface}; @@ -25,7 +24,14 @@ pub struct GasAdjuster { impl GasAdjuster { pub async fn new(eth_client: E, config: GasAdjusterConfig) -> Result { - let current_block = eth_client.block_number("gas_adjuster").await?.as_usize(); + // Subtracting 1 from the "latest" block number to prevent errors in case + // the info about the latest block is not yet present on the node. + // This sometimes happens on Infura. + let current_block = eth_client + .block_number("gas_adjuster") + .await? + .as_usize() + .saturating_sub(1); let history = eth_client .base_fee_history(current_block, config.max_base_fee_samples, "gas_adjuster") .await?; @@ -39,6 +45,10 @@ impl GasAdjuster { /// Returns the sum of base and priority fee, in wei, not considering time in mempool. /// Can be used to get an estimate of current gas price. pub fn estimate_effective_gas_price(&self) -> u64 { + if let Some(price) = self.config.internal_enforced_l1_gas_price { + return price; + } + let effective_gas_price = self.get_base_fee(0) + self.get_priority_fee(); (self.config.internal_l1_pricing_multiplier * effective_gas_price as f64) as u64 @@ -120,11 +130,11 @@ impl GasAdjuster { Ok(()) } - pub fn l2_tx_filter(&self) -> L2TxFilter { + pub fn l2_tx_filter(&self, fair_l2_gas_price: u64) -> L2TxFilter { let effective_gas_price = self.estimate_effective_gas_price(); let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(effective_gas_price, FAIR_L2_GAS_PRICE); + derive_base_fee_and_gas_per_pubdata(effective_gas_price, fair_l2_gas_price); L2TxFilter { l1_gas_price: effective_gas_price, fee_per_gas: base_fee, diff --git a/core/bin/zksync_core/src/gas_adjuster/tests.rs b/core/bin/zksync_core/src/gas_adjuster/tests.rs index 40cf84f5d0dd..40b948a6ef9c 100644 --- a/core/bin/zksync_core/src/gas_adjuster/tests.rs +++ b/core/bin/zksync_core/src/gas_adjuster/tests.rs @@ -44,6 +44,7 @@ async fn kept_updated() { pricing_formula_parameter_a: 1.5, pricing_formula_parameter_b: 1.0005, internal_l1_pricing_multiplier: 0.8, + internal_enforced_l1_gas_price: None, poll_period: 5, }, ) diff --git a/core/bin/zksync_core/src/gas_tracker/mod.rs b/core/bin/zksync_core/src/gas_tracker/mod.rs index 5ec2a71b5489..2426a9dad96d 100644 --- a/core/bin/zksync_core/src/gas_tracker/mod.rs +++ b/core/bin/zksync_core/src/gas_tracker/mod.rs @@ -1,8 +1,11 @@ //! This module predicts L1 gas cost for the Commit/PublishProof/Execute operations. use zksync_types::{ - aggregated_operations::AggregatedActionType, block::BlockGasCount, - commitment::BlockWithMetadata, tx::ExecutionMetrics, ExecuteTransactionCommon, Transaction, + aggregated_operations::AggregatedActionType, + block::BlockGasCount, + commitment::BlockWithMetadata, + tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, + ExecuteTransactionCommon, Transaction, }; use self::constants::*; @@ -41,10 +44,14 @@ impl GasCost for Transaction { } } -pub fn additional_commit_cost(execution_metrics: &ExecutionMetrics) -> u32 { +fn additional_pubdata_commit_cost(execution_metrics: &ExecutionMetrics) -> u32 { (execution_metrics.size() as u32) * GAS_PER_BYTE } +fn additional_writes_commit_cost(writes_metrics: &DeduplicatedWritesMetrics) -> u32 { + (writes_metrics.size() as u32) * GAS_PER_BYTE +} + pub fn new_block_gas_count() -> BlockGasCount { BlockGasCount { commit: block_base_cost(AggregatedActionType::CommitBlocks), @@ -58,7 +65,7 @@ pub fn gas_count_from_tx_and_metrics( execution_metrics: &ExecutionMetrics, ) -> BlockGasCount { let commit = tx.base_cost(AggregatedActionType::CommitBlocks) - + additional_commit_cost(execution_metrics); + + additional_pubdata_commit_cost(execution_metrics); BlockGasCount { commit, prove: tx.base_cost(AggregatedActionType::PublishProofBlocksOnchain), @@ -68,7 +75,15 @@ pub fn gas_count_from_tx_and_metrics( pub fn gas_count_from_metrics(execution_metrics: &ExecutionMetrics) -> BlockGasCount { BlockGasCount { - commit: additional_commit_cost(execution_metrics), + commit: additional_pubdata_commit_cost(execution_metrics), + prove: 0, + execute: 0, + } +} + +pub fn gas_count_from_writes(writes_metrics: &DeduplicatedWritesMetrics) -> BlockGasCount { + BlockGasCount { + commit: additional_writes_commit_cost(writes_metrics), prove: 0, execute: 0, } diff --git a/core/bin/zksync_core/src/genesis.rs b/core/bin/zksync_core/src/genesis.rs index f181233ad775..bc0a1a6e266e 100644 --- a/core/bin/zksync_core/src/genesis.rs +++ b/core/bin/zksync_core/src/genesis.rs @@ -4,28 +4,31 @@ use tempfile::TempDir; use vm::zk_evm::aux_structures::{LogQuery, Timestamp}; -use zksync_types::system_contracts::get_system_smart_contracts; -use zksync_types::tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}; use zksync_types::{ + block::DeployedContract, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - get_code_key, Address, L1BatchNumber, MiniblockNumber, StorageLog, StorageLogKind, - StorageLogQueryType, H256, + commitment::{BlockCommitment, BlockMetadata}, + get_code_key, get_system_context_init_logs, + system_contracts::get_system_smart_contracts, + tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + Address, L1BatchNumber, MiniblockNumber, StorageLog, StorageLogKind, H256, }; -use zksync_types::{get_system_context_init_logs, StorageLogQuery, FAIR_L2_GAS_PRICE}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, miniblock_hash}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash}; use zksync_config::ZkSyncConfig; +use zksync_contracts::BaseSystemContracts; use zksync_merkle_tree::ZkSyncTree; use zksync_dal::StorageProcessor; use zksync_storage::db::Database; use zksync_storage::RocksDB; -use zksync_types::block::DeployedContract; -use zksync_types::commitment::{BlockCommitment, BlockMetadata}; -use zksync_types::log_query_sorter::sort_storage_access_queries; -pub async fn ensure_genesis_state(storage: &mut StorageProcessor<'_>, config: ZkSyncConfig) { +pub async fn ensure_genesis_state( + storage: &mut StorageProcessor<'_>, + config: &ZkSyncConfig, +) -> H256 { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); let mut tree = ZkSyncTree::new(db); @@ -35,7 +38,13 @@ pub async fn ensure_genesis_state(storage: &mut StorageProcessor<'_>, config: Zk // return if genesis block was already processed if !transaction.blocks_dal().is_genesis_needed() { vlog::debug!("genesis is not needed!"); - return; + return transaction + .blocks_dal() + .get_storage_block(L1BatchNumber(0)) + .expect("genesis block is not found") + .hash + .map(|h| H256::from_slice(&h)) + .expect("genesis block hash is empty"); } vlog::info!("running regenesis"); @@ -43,7 +52,16 @@ pub async fn ensure_genesis_state(storage: &mut StorageProcessor<'_>, config: Zk let first_validator_address = config.eth_sender.sender.operator_commit_eth_addr; let chain_id = H256::from_low_u64_be(config.chain.eth.zksync_network_id as u64); - chain_schema_genesis(&mut transaction, first_validator_address, chain_id).await; + let base_system_contracts = BaseSystemContracts::load_from_disk(); + let base_system_contracts_hash = base_system_contracts.hashes(); + + chain_schema_genesis( + &mut transaction, + first_validator_address, + chain_id, + base_system_contracts, + ) + .await; vlog::info!("chain_schema_genesis is complete"); let storage_logs = @@ -58,6 +76,8 @@ pub async fn ensure_genesis_state(storage: &mut StorageProcessor<'_>, config: Zk genesis_root_hash, vec![], vec![], + config.chain.state_keeper.bootloader_hash, + config.chain.state_keeper.default_aa_hash, ); operations_schema_genesis( @@ -83,6 +103,36 @@ pub async fn ensure_genesis_state(storage: &mut StorageProcessor<'_>, config: Zk "CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX={}", rollup_last_leaf_index ); + println!( + "CHAIN_STATE_KEEPER_BOOTLOADER_HASH={:?}", + base_system_contracts_hash.bootloader + ); + println!( + "CHAIN_STATE_KEEPER_DEFAULT_AA_HASH={:?}", + base_system_contracts_hash.default_aa + ); + + genesis_root_hash +} + +// Default account and bootloader are not a regular system contracts +// they have never been actually deployed anywhere, +// They are the initial code that is fed into the VM upon its start. +// Both are rather parameters of a block and not system contracts. +// The code of the bootloader should not be deployed anywhere anywhere in the kernel space (i.e. addresses below 2^16) +// because in this case we will have to worry about protecting it. +fn insert_base_system_contracts_to_factory_deps( + storage: &mut StorageProcessor<'_>, + contracts: BaseSystemContracts, +) { + let factory_deps = vec![contracts.bootloader, contracts.default_aa] + .iter() + .map(|c| (c.hash, be_words_to_bytes(&c.code))) + .collect(); + + storage + .storage_dal() + .insert_factory_deps(MiniblockNumber(0), factory_deps); } async fn insert_system_contracts( @@ -116,7 +166,7 @@ async fn insert_system_contracts( // we don't produce proof for the genesis block, // but we still need to populate the table // to have the correct initial state of the merkle tree - let log_queries: Vec = storage_logs + let log_queries: Vec = storage_logs .iter() .enumerate() .flat_map(|(tx_index, (_, storage_logs))| { @@ -124,29 +174,22 @@ async fn insert_system_contracts( .iter() .enumerate() .map(move |(log_index, storage_log)| { - let log_type = match storage_log.kind { - StorageLogKind::Read => StorageLogQueryType::Read, - StorageLogKind::Write => StorageLogQueryType::InitialWrite, - }; - StorageLogQuery { - log_query: LogQuery { - // Monotonically increasing Timestamp. Normally it's generated by the VM, but we don't have a VM in the genesis block. - timestamp: Timestamp(((tx_index << 16) + log_index) as u32), - tx_number_in_block: tx_index as u16, - aux_byte: 0, - shard_id: 0, - address: *storage_log.key.address(), - key: h256_to_u256(*storage_log.key.key()), - read_value: h256_to_u256(H256::zero()), - written_value: h256_to_u256(storage_log.value), - rw_flag: storage_log.kind == StorageLogKind::Write, - rollback: false, - is_service: false, - }, - log_type, + LogQuery { + // Monotonically increasing Timestamp. Normally it's generated by the VM, but we don't have a VM in the genesis block. + timestamp: Timestamp(((tx_index << 16) + log_index) as u32), + tx_number_in_block: tx_index as u16, + aux_byte: 0, + shard_id: 0, + address: *storage_log.key.address(), + key: h256_to_u256(*storage_log.key.key()), + read_value: h256_to_u256(H256::zero()), + written_value: h256_to_u256(storage_log.value), + rw_flag: storage_log.kind == StorageLogKind::Write, + rollback: false, + is_service: false, } }) - .collect::>() + .collect::>() }) .collect(); @@ -156,9 +199,9 @@ async fn insert_system_contracts( .storage_logs_dedup_dal() .insert_storage_logs(L1BatchNumber(0), &deduped_log_queries); - let (protective_reads, deduplicated_writes): (Vec<_>, Vec<_>) = deduped_log_queries + let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries .into_iter() - .partition(|log_query| log_query.log_type == StorageLogQueryType::Read); + .partition(|log_query| log_query.rw_flag); transaction .storage_logs_dedup_dal() .insert_protective_reads(L1BatchNumber(0), &protective_reads); @@ -183,8 +226,14 @@ pub(crate) async fn chain_schema_genesis<'a>( storage: &mut StorageProcessor<'_>, first_validator_address: Address, chain_id: H256, + base_system_contracts: BaseSystemContracts, ) { - let mut zero_block_header = L1BatchHeader::new(L1BatchNumber(0), 0, first_validator_address); + let mut zero_block_header = L1BatchHeader::new( + L1BatchNumber(0), + 0, + first_validator_address, + base_system_contracts.hashes(), + ); zero_block_header.is_finished = true; let zero_miniblock_header = MiniblockHeader { @@ -195,7 +244,8 @@ pub(crate) async fn chain_schema_genesis<'a>( l2_tx_count: 0, base_fee_per_gas: 0, l1_gas_price: 0, - l2_fair_gas_price: FAIR_L2_GAS_PRICE, + l2_fair_gas_price: 0, + base_system_contracts_hashes: base_system_contracts.hashes(), }; let mut transaction = storage.start_transaction().await; @@ -210,6 +260,8 @@ pub(crate) async fn chain_schema_genesis<'a>( .blocks_dal() .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(0)); + insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts); + let contracts = get_system_smart_contracts(); insert_system_contracts(&mut transaction, contracts, chain_id).await; diff --git a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs new file mode 100644 index 000000000000..dcd5c7d7ddf4 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -0,0 +1,70 @@ +use crate::house_keeper::periodic_job::PeriodicJob; +use zksync_dal::ConnectionPool; + +#[derive(Debug, Default)] +pub struct L1BatchMetricsReporter; + +impl L1BatchMetricsReporter { + fn report_metrics(&self, connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_storage_blocking(); + let mut block_metrics = vec![ + ( + conn.blocks_dal().get_sealed_block_number(), + "sealed".to_string(), + ), + ( + conn.blocks_dal().get_last_block_number_with_metadata(), + "metadata_calculated".to_string(), + ), + ( + conn.blocks_dal() + .get_last_l1_batch_number_with_witness_inputs(), + "merkle_proof_calculated".to_string(), + ), + ]; + + let eth_stats = conn.eth_sender_dal().get_eth_l1_batches(); + for (tx_type, l1_batch) in eth_stats.saved { + block_metrics.push((l1_batch, format!("l1_saved_{:?}", tx_type))) + } + + for (tx_type, l1_batch) in eth_stats.mined { + block_metrics.push((l1_batch, format!("l1_mined_{:?}", tx_type))) + } + + block_metrics.append( + &mut conn + .prover_dal() + .get_proven_l1_batches() + .into_iter() + .map(|(l1_batch_number, stage)| (l1_batch_number, format!("prove_{:?}", stage))) + .collect(), + ); + + block_metrics.append( + &mut conn + .witness_generator_dal() + .get_witness_generated_l1_batches() + .into_iter() + .map(|(l1_batch_number, stage)| (l1_batch_number, format!("wit_gen_{:?}", stage))) + .collect(), + ); + + for (l1_batch_number, stage) in block_metrics { + metrics::gauge!( + "server.block_number", + l1_batch_number.0 as f64, + "stage" => stage + ); + } + } +} + +impl PeriodicJob for L1BatchMetricsReporter { + const SERVICE_NAME: &'static str = "L1BatchMetricsReporter"; + const POLLING_INTERVAL_MS: u64 = 10000; + + fn run_routine_task(&mut self, connection_pool: ConnectionPool) { + self.report_metrics(connection_pool); + } +} diff --git a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs index a71e0da76386..24ccfcb8fd1f 100644 --- a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs +++ b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs @@ -1,11 +1,6 @@ -use std::error; - use zksync_dal::ConnectionPool; -use zksync_object_store::cloud_storage::Reason; -use zksync_object_store::gcs_object_store::cloud_storage::Error; -use zksync_object_store::gcs_object_store::GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE; use zksync_object_store::object_store::{ - DynamicObjectStore, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, + DynamicObjectStore, ObjectStoreError, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, PROVER_JOBS_BUCKET_PATH, SCHEDULER_WITNESS_JOBS_BUCKET_PATH, WITNESS_INPUT_BUCKET_PATH, }; @@ -19,36 +14,20 @@ pub struct GcsBlobCleaner { const BATCH_CLEANUP_SIZE: u8 = 5; -fn handle_remove_result(object_store_type: &str, result: Result<(), Box>) { - if object_store_type == GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE { - match result { - Ok(_) => {} // DO NOTHING - Err(err) => { - let gcs_error = err.downcast::().unwrap(); - match *gcs_error { - Error::Google(err) => { - if err - .error - .errors - .iter() - .any(|err| matches!(err.reason, Reason::NotFound)) - { - return; - } - panic!("{:?}", err) - } - _ => { - panic!("{:?}", gcs_error) - } - } +fn handle_remove_result(result: Result<(), ObjectStoreError>) { + if let Err(error) = result { + match error { + // There can be scenario when the removal from the GCS succeeded and updating the DB after that fails, + // in this scenario the retry of removal from GCS would fail as the object is already removed. + // Hence we ignore the KeyNotFound error below + ObjectStoreError::KeyNotFound(_) => {} + ObjectStoreError::Other(err) => { + panic!("{:?}", err) } } } } -/// There can be scenario when the removal from the GCS succeeded and updating the DB after that fails, -/// in this scenario the retry of removal from GCS would fail as the object is already removed. -/// To handle this either the `Key does not exist` error from GCS can be ignored or other option is to do everything inside a transaction. impl GcsBlobCleaner { fn cleanup_blobs(&mut self, pool: ConnectionPool) { self.cleanup_prover_jobs_blobs(pool.clone()); @@ -66,13 +45,12 @@ impl GcsBlobCleaner { let (ids, circuit_input_blob_urls): (Vec<_>, Vec<_>) = id_blob_urls_tuple.into_iter().unzip(); - vlog::info!("Found {} provers jobs for cleaning blobs", ids.len()); + if !ids.is_empty() { + vlog::info!("Found {} provers jobs for cleaning blobs", ids.len()); + } circuit_input_blob_urls.into_iter().for_each(|url| { - handle_remove_result( - self.object_store.get_store_type(), - self.object_store.remove(PROVER_JOBS_BUCKET_PATH, url), - ); + handle_remove_result(self.object_store.remove(PROVER_JOBS_BUCKET_PATH, url)); }); conn.prover_dal().mark_gcs_blobs_as_cleaned(ids); @@ -86,16 +64,15 @@ impl GcsBlobCleaner { let (l1_batch_numbers, merkle_tree_paths_blob_urls): (Vec<_>, Vec<_>) = l1_batches_blob_urls_tuple.into_iter().unzip(); - vlog::info!( - "Found {} witness inputs for cleaning blobs", - l1_batch_numbers.len() - ); + if !l1_batch_numbers.is_empty() { + vlog::info!( + "Found {} witness inputs for cleaning blobs", + l1_batch_numbers.len() + ); + } merkle_tree_paths_blob_urls.into_iter().for_each(|url| { - handle_remove_result( - self.object_store.get_store_type(), - self.object_store.remove(WITNESS_INPUT_BUCKET_PATH, url), - ); + handle_remove_result(self.object_store.remove(WITNESS_INPUT_BUCKET_PATH, url)); }); conn.blocks_dal() .mark_gcs_blobs_as_cleaned(l1_batch_numbers); @@ -110,21 +87,21 @@ impl GcsBlobCleaner { let (l1_batch_numbers, basic_circuit_and_circuit_inputs_blob_urls): (Vec<_>, Vec<_>) = l1_batches_blob_urls_tuple.into_iter().unzip(); - vlog::info!( - "Found {} leaf aggregation witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); + if !l1_batch_numbers.is_empty() { + vlog::info!( + "Found {} leaf aggregation witness jobs for cleaning blobs", + l1_batch_numbers.len() + ); + } basic_circuit_and_circuit_inputs_blob_urls .into_iter() .for_each(|url_pair| { handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.0), ); handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.1), ); @@ -147,21 +124,21 @@ impl GcsBlobCleaner { Vec<_>, ) = l1_batches_blob_urls_tuple.into_iter().unzip(); - vlog::info!( - "Found {} node aggregation witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); + if !l1_batch_numbers.is_empty() { + vlog::info!( + "Found {} node aggregation witness jobs for cleaning blobs", + l1_batch_numbers.len() + ); + } leaf_layer_subqueues_and_aggregation_outputs_blob_urls .into_iter() .for_each(|url_pair| { handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.0), ); handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, url_pair.1), ); @@ -183,21 +160,21 @@ impl GcsBlobCleaner { Vec<_>, ) = l1_batches_blob_urls_tuple.into_iter().unzip(); - vlog::info!( - "Found {} scheduler witness jobs for cleaning blobs", - l1_batch_numbers.len() - ); + if !l1_batch_numbers.is_empty() { + vlog::info!( + "Found {} scheduler witness jobs for cleaning blobs", + l1_batch_numbers.len() + ); + } scheduler_witness_and_node_aggregations_blob_urls .into_iter() .for_each(|url_pair| { handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(SCHEDULER_WITNESS_JOBS_BUCKET_PATH, url_pair.0), ); handle_remove_result( - self.object_store.get_store_type(), self.object_store .remove(SCHEDULER_WITNESS_JOBS_BUCKET_PATH, url_pair.1), ); diff --git a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index 17b09d56f919..c7284e986cee 100644 --- a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -15,10 +15,13 @@ impl PeriodicJob for GpuProverQueueMonitor { .access_storage_blocking() .gpu_prover_queue_dal() .get_count_of_jobs_ready_for_processing(); - vlog::info!( - "Found {} free circuit synthesizer jobs", - free_prover_instance_count - ); + + if free_prover_instance_count > 0 { + vlog::info!( + "Found {} free circuit synthesizer jobs", + free_prover_instance_count + ); + } metrics::gauge!( "server.circuit_synthesizer.jobs", diff --git a/core/bin/zksync_core/src/house_keeper/mod.rs b/core/bin/zksync_core/src/house_keeper/mod.rs index cbf6ed875184..54c708f9f3ab 100644 --- a/core/bin/zksync_core/src/house_keeper/mod.rs +++ b/core/bin/zksync_core/src/house_keeper/mod.rs @@ -1,5 +1,7 @@ +pub mod blocks_state_reporter; pub mod gcs_blob_cleaner; pub mod gpu_prover_queue_monitor; pub mod periodic_job; +pub mod prover_queue_monitor; pub mod witness_generator_misc_reporter; pub mod witness_generator_queue_monitor; diff --git a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs new file mode 100644 index 000000000000..a8729957faea --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs @@ -0,0 +1,56 @@ +use std::collections::HashMap; + +use zksync_config::configs::ProverGroupConfig; +use zksync_dal::ConnectionPool; +use zksync_prover_utils::circuit_name_to_numeric_index; +use zksync_types::proofs::JobCountStatistics; + +use crate::house_keeper::periodic_job::PeriodicJob; + +const PROVER_SERVICE_NAME: &str = "prover"; + +#[derive(Debug, Default)] +pub struct ProverStatsReporter {} + +impl ProverStatsReporter { + fn get_job_statistics(connection_pool: ConnectionPool) -> HashMap { + let mut conn = connection_pool.access_storage_blocking(); + conn.prover_dal().get_prover_jobs_stats_per_circuit() + } +} + +/// Invoked periodically to push job statistics to Prometheus +/// Note: these values will be used for manually scaling provers. +impl PeriodicJob for ProverStatsReporter { + const SERVICE_NAME: &'static str = "ProverStatsReporter"; + + fn run_routine_task(&mut self, connection_pool: ConnectionPool) { + let prover_group_config = ProverGroupConfig::from_env(); + let stats = Self::get_job_statistics(connection_pool); + let prover_group_to_stats: HashMap = stats + .into_iter() + .map(|(key, value)| { + ( + prover_group_config + .get_group_id_for_circuit_id(circuit_name_to_numeric_index(&key).unwrap()) + .unwrap(), + value, + ) + }) + .collect(); + for (group_id, stats) in prover_group_to_stats.into_iter() { + metrics::gauge!( + format!("server.{}.jobs", PROVER_SERVICE_NAME), + stats.queued as f64, + "type" => "queued", + "prover_group_id" => group_id.to_string(), + ); + + metrics::gauge!( + format!("server.{}.jobs", PROVER_SERVICE_NAME), + stats.in_progress as f64, + "type" => "in_progress", "prover_group_id" => group_id.to_string(), + ); + } + } +} diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs index 364d56fb9b72..ee01b66e4c57 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_misc_reporter.rs @@ -1,8 +1,5 @@ use crate::house_keeper::periodic_job::PeriodicJob; -use zksync_config::configs::{ - prover::ProverConfig, - witness_generator::{SamplingMode, WitnessGeneratorConfig}, -}; +use zksync_config::configs::{prover::ProverConfig, witness_generator::WitnessGeneratorConfig}; use zksync_dal::ConnectionPool; #[derive(Debug)] @@ -21,17 +18,6 @@ impl WitnessGeneratorMetricsReporter { .unwrap_or(last_sealed_l1_batch_number); let prover_lag = last_sealed_l1_batch_number.0 - min_unproved_l1_batch_number.0; metrics::gauge!("server.prover.lag", prover_lag as f64); - - if let SamplingMode::Enabled(sampling_params) = - self.witness_generator_config.sampling_mode() - { - let sampling_probability = - sampling_params.calculate_sampling_probability(prover_lag as usize); - metrics::gauge!( - "server.witness_generator.sampling_probability", - sampling_probability - ); - } } } diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index 047306a10d86..268d6d6a13d7 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -33,7 +33,9 @@ impl PeriodicJob for WitnessGeneratorStatsReporter { fn run_routine_task(&mut self, connection_pool: ConnectionPool) { let stats = Self::get_job_statistics(connection_pool); - vlog::info!("Found {} free witness generators jobs", stats.queued); + if stats.queued > 0 { + vlog::info!("Found {} free witness generators jobs", stats.queued); + } metrics::gauge!( format!("server.{}.jobs", WITNESS_GENERATOR_SERVICE_NAME), diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index 7296bdc6eeff..cb3938a49b87 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -13,9 +13,8 @@ use zksync_config::configs::WitnessGeneratorConfig; use house_keeper::periodic_job::PeriodicJob; use prometheus_exporter::run_prometheus_exporter; use zksync_circuit_breaker::{ - code_hashes::CodeHashesChecker, facet_selectors::FacetSelectorsChecker, - l1_txs::FailedL1TransactionChecker, vks::VksChecker, CircuitBreaker, CircuitBreakerChecker, - CircuitBreakerError, + facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, vks::VksChecker, + CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, }; use zksync_config::ZkSyncConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; @@ -27,14 +26,17 @@ use zksync_queued_job_processor::JobProcessor; use crate::eth_sender::{Aggregator, EthTxManager}; use crate::fee_monitor::FeeMonitor; +use crate::house_keeper::blocks_state_reporter::L1BatchMetricsReporter; use crate::house_keeper::gcs_blob_cleaner::GcsBlobCleaner; use crate::house_keeper::gpu_prover_queue_monitor::GpuProverQueueMonitor; use crate::house_keeper::{ + prover_queue_monitor::ProverStatsReporter, witness_generator_misc_reporter::WitnessGeneratorMetricsReporter, witness_generator_queue_monitor::WitnessGeneratorStatsReporter, }; use crate::metadata_calculator::{MetadataCalculator, MetadataCalculatorMode}; -use crate::state_keeper::{MempoolFetcher, MempoolGuard}; +use crate::state_keeper::mempool_actor::MempoolFetcher; +use crate::state_keeper::MempoolGuard; use crate::witness_generator::WitnessGenerator; use crate::{ api_server::{explorer, web3}, @@ -57,6 +59,7 @@ pub mod genesis; pub mod house_keeper; pub mod metadata_calculator; pub mod state_keeper; +pub mod sync_layer; pub mod witness_generator; /// Waits for *any* of the tokio tasks to be finished. @@ -86,7 +89,7 @@ pub async fn wait_for_tasks(task_futures: Vec>, tasks_allowed_to_ /// Inserts the initial information about zkSync tokens into the database. pub async fn genesis_init(config: ZkSyncConfig) { let mut storage = StorageProcessor::establish_connection(true).await; - genesis::ensure_genesis_state(&mut storage, config).await; + genesis::ensure_genesis_state(&mut storage, &config).await; } #[derive(Clone, Debug, PartialEq)] @@ -273,6 +276,7 @@ pub async fn initialize_components( mempool_fetcher_pool, config.chain.mempool.remove_stuck_txs, config.chain.mempool.stuck_tx_timeout(), + config.chain.state_keeper.fair_l2_gas_price, stop_receiver.clone(), ))); @@ -315,12 +319,14 @@ pub async fn initialize_components( let eth_tx_aggregator_actor = EthTxAggregator::new( config.eth_sender.sender.clone(), Aggregator::new(config.eth_sender.sender.clone()), - config.contracts.diamond_proxy_addr, + config.contracts.validator_timelock_addr, nonce.as_u64(), ); - task_futures.push(tokio::spawn( - eth_tx_aggregator_actor.run(eth_sender_storage.clone(), stop_receiver.clone()), - )); + task_futures.push(tokio::spawn(eth_tx_aggregator_actor.run( + eth_sender_storage.clone(), + eth_gateway.clone(), + stop_receiver.clone(), + ))); vlog::info!("initialized ETH-TxAggregator in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed().as_secs() as f64, "stage" => "eth_tx_aggregator"); } @@ -432,6 +438,8 @@ pub async fn initialize_components( tokio::spawn(witness_generator_misc_reporter.run(ConnectionPool::new(Some(1), true))), tokio::spawn(GpuProverQueueMonitor::default().run(ConnectionPool::new(Some(1), true))), tokio::spawn(gcs_blob_cleaner.run(ConnectionPool::new(Some(1), true))), + tokio::spawn(L1BatchMetricsReporter::default().run(ConnectionPool::new(Some(1), true))), + tokio::spawn(ProverStatsReporter::default().run(ConnectionPool::new(Some(1), true))), ]; task_futures.extend(witness_generator_metrics); @@ -512,19 +520,6 @@ fn circuit_breakers_for_components( })); } - if components.iter().any(|c| { - matches!( - c, - Component::EthTxAggregator - | Component::EthTxManager - | Component::StateKeeper - | Component::Tree - | Component::TreeBackup - ) - }) { - circuit_breakers.push(Box::new(CodeHashesChecker::new(config))); - } - if components.iter().any(|c| { matches!( c, diff --git a/core/bin/zksync_core/src/metadata_calculator/mod.rs b/core/bin/zksync_core/src/metadata_calculator/mod.rs index 2acd9b94e83f..2a0c5013426e 100644 --- a/core/bin/zksync_core/src/metadata_calculator/mod.rs +++ b/core/bin/zksync_core/src/metadata_calculator/mod.rs @@ -442,6 +442,8 @@ impl MetadataCalculator { merkle_root_hash, tree_metadata_at_block.initial_writes.clone(), tree_metadata_at_block.repeated_writes.clone(), + l1_batch_header.base_system_contracts_hashes.bootloader, + l1_batch_header.base_system_contracts_hashes.default_aa, ); let block_commitment_hash = block_commitment.hash(); vlog::trace!("Block commitment {:?}", &block_commitment); diff --git a/core/bin/zksync_core/src/metadata_calculator/tests.rs b/core/bin/zksync_core/src/metadata_calculator/tests.rs index 410992d15025..8294cc1efdec 100644 --- a/core/bin/zksync_core/src/metadata_calculator/tests.rs +++ b/core/bin/zksync_core/src/metadata_calculator/tests.rs @@ -5,13 +5,13 @@ use std::str::FromStr; use db_test_macro::db_test; use tempfile::TempDir; use tokio::sync::watch; -use zksync_types::FAIR_L2_GAS_PRICE; use crate::genesis::{chain_schema_genesis, operations_schema_genesis}; use crate::metadata_calculator::MetadataCalculator; use crate::MetadataCalculatorMode; use zksync_config::ZkSyncConfig; +use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; use zksync_merkle_tree::ZkSyncTree; use zksync_storage::db::Database; @@ -158,9 +158,18 @@ async fn setup_metadata_calculator_with_options( if storage.blocks_dal().is_genesis_needed() { let chain_id = H256::from_low_u64_be(config.chain.eth.zksync_network_id as u64); - chain_schema_genesis(&mut storage, fee_address, chain_id).await; - let block_commitment = BlockCommitment::new(vec![], 0, Default::default(), vec![], vec![]); + let base_system_contracts = BaseSystemContracts::load_from_disk(); + let block_commitment = BlockCommitment::new( + vec![], + 0, + Default::default(), + vec![], + vec![], + base_system_contracts.bootloader.hash, + base_system_contracts.default_aa.hash, + ); + chain_schema_genesis(&mut storage, fee_address, chain_id, base_system_contracts).await; operations_schema_genesis( &mut storage, &block_commitment, @@ -180,11 +189,18 @@ async fn reset_db_state(pool: ConnectionPool, num_blocks: usize) { storage.blocks_dal().delete_miniblocks(MiniblockNumber(0)); storage.blocks_dal().delete_l1_batches(L1BatchNumber(0)); + let base_system_contracts = BaseSystemContracts::load_from_disk(); let all_logs = gen_storage_logs(num_blocks); for (block_number, block_logs) in (1..=(num_blocks as u32)).zip(all_logs) { - let mut header = L1BatchHeader::mock(L1BatchNumber(block_number)); + let mut header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Address::default(), + base_system_contracts.hashes(), + ); header.is_finished = true; // Assumes that L1 batch consists of only one miniblock. + let miniblock_header = MiniblockHeader { number: MiniblockNumber(block_number), timestamp: header.timestamp, @@ -193,7 +209,8 @@ async fn reset_db_state(pool: ConnectionPool, num_blocks: usize) { l2_tx_count: header.l2_tx_count, base_fee_per_gas: header.base_fee_per_gas, l1_gas_price: 0, - l2_fair_gas_price: FAIR_L2_GAS_PRICE, + l2_fair_gas_price: 0, + base_system_contracts_hashes: base_system_contracts.hashes(), }; storage diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs index a5390b52c063..169eb60b7aaf 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -4,18 +4,19 @@ use vm::{ storage::Storage, vm::{VmPartialExecutionResult, VmTxExecutionResult}, vm_with_bootloader::{ - init_vm, push_transaction_to_bootloader_memory, BlockContextMode, BootloaderJobType, + init_vm, init_vm_with_gas_limit, push_transaction_to_bootloader_memory, BootloaderJobType, TxExecutionMode, }, - zk_evm::block_properties::BlockProperties, TxRevertReason, VmBlockResult, VmInstance, }; use zksync_dal::ConnectionPool; use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; use zksync_storage::{db::Database, RocksDB}; use zksync_types::{tx::ExecutionMetrics, Transaction, U256}; +use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; use crate::gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}; +use crate::state_keeper::io::L1BatchParams; use crate::state_keeper::types::ExecutionMetricsForCriteria; @@ -37,30 +38,40 @@ pub(crate) struct TxExecutionResult { /// Execution metrics for the bootloader tip dry run. /// Will be `None` if either the transaction was rejected or if bootloader tip dry run failed. pub(super) bootloader_dry_run_metrics: Option, + /// Bytecodes that are to be published as compressed with this transaction + pub(super) compressed_bytecodes: Vec, } impl TxExecutionResult { - pub(super) fn new(tx_result: Result) -> Self { + pub(crate) fn new( + tx_result: Result<(VmTxExecutionResult, Vec), TxRevertReason>, + ) -> Self { + let (tx_result, compressed_bytecodes) = match tx_result { + Ok((result, compressed_bytecodes)) => (Ok(result), compressed_bytecodes), + Err(err) => (Err(err), vec![]), + }; + Self { tx_result, bootloader_dry_run_result: None, tx_metrics: None, bootloader_dry_run_metrics: None, + compressed_bytecodes, } } - pub(super) fn add_tx_metrics(&mut self, tx_metrics: ExecutionMetricsForCriteria) { + pub(crate) fn add_tx_metrics(&mut self, tx_metrics: ExecutionMetricsForCriteria) { self.tx_metrics = Some(tx_metrics); } - pub(super) fn add_bootloader_result( + pub(crate) fn add_bootloader_result( &mut self, bootloader_dry_run_result: Result, ) { self.bootloader_dry_run_result = Some(bootloader_dry_run_result); } - pub(super) fn add_bootloader_metrics( + pub(crate) fn add_bootloader_metrics( &mut self, bootloader_dry_run_metrics: ExecutionMetricsForCriteria, ) { @@ -94,12 +105,8 @@ impl TxExecutionResult { /// An abstraction that allows us to create different kinds of batch executors. /// The only requirement is to return the `BatchExecutorHandle` object, which does its work /// by communicating with the externally initialized thread. -pub(crate) trait L1BatchExecutorBuilder: 'static + std::fmt::Debug + Send { - fn init_batch( - &self, - block_context: BlockContextMode, - block_properties: BlockProperties, - ) -> BatchExecutorHandle; +pub trait L1BatchExecutorBuilder: 'static + std::fmt::Debug + Send { + fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle; } /// The default implementation of the `BatchExecutorBuilder`. @@ -110,6 +117,7 @@ pub(crate) struct MainBatchExecutorBuilder { pool: ConnectionPool, reexecute_each_tx: bool, max_allowed_tx_gas_limit: U256, + validation_computational_gas_limit: u32, } impl MainBatchExecutorBuilder { @@ -118,22 +126,20 @@ impl MainBatchExecutorBuilder { pool: ConnectionPool, reexecute_each_tx: bool, max_allowed_tx_gas_limit: U256, + validation_computational_gas_limit: u32, ) -> Self { Self { state_keeper_db_path, pool, reexecute_each_tx, max_allowed_tx_gas_limit, + validation_computational_gas_limit, } } } impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { - fn init_batch( - &self, - block_context: BlockContextMode, - block_properties: BlockProperties, - ) -> BatchExecutorHandle { + fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle { let secondary_storage = self .pool .access_storage_blocking() @@ -145,7 +151,11 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { )); vlog::info!( "Secondary storage for batch {} initialized, size is {}", - block_context.inner_block_context().context.block_number, + l1_batch_params + .context_mode + .inner_block_context() + .context + .block_number, secondary_storage.get_estimated_map_size() ); metrics::gauge!( @@ -155,9 +165,10 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { BatchExecutorHandle::new( self.reexecute_each_tx, self.max_allowed_tx_gas_limit, - block_context, - block_properties, + self.validation_computational_gas_limit, secondary_storage, + l1_batch_params, + None, ) } } @@ -166,7 +177,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { /// `BatchExecutorHandle` is stored in the state keeper and is used to invoke or rollback transactions, and also seal /// the batches. #[derive(Debug)] -pub(crate) struct BatchExecutorHandle { +pub struct BatchExecutorHandle { handle: thread::JoinHandle<()>, commands: mpsc::Sender, } @@ -175,19 +186,21 @@ impl BatchExecutorHandle { pub(super) fn new( reexecute_each_tx: bool, max_allowed_tx_gas_limit: U256, - block_context: BlockContextMode, - block_properties: BlockProperties, + validation_computational_gas_limit: u32, secondary_storage: SecondaryStateStorage, + l1_batch_params: L1BatchParams, + vm_gas_limit: Option, ) -> Self { let (commands_sender, commands_receiver) = mpsc::channel(); let executor = BatchExecutor { reexecute_each_tx, max_allowed_tx_gas_limit, + validation_computational_gas_limit, commands: commands_receiver, + vm_gas_limit, }; - let handle = - thread::spawn(move || executor.run(secondary_storage, block_context, block_properties)); + let handle = thread::spawn(move || executor.run(secondary_storage, l1_batch_params)); Self { handle, @@ -197,8 +210,7 @@ impl BatchExecutorHandle { /// Creates a batch executor handle from the provided sender and thread join handle. /// Can be used to inject an alternative batch executor implementation. - #[cfg(test)] - pub(super) fn from_raw( + pub(crate) fn from_raw( handle: thread::JoinHandle<()>, commands: mpsc::Sender, ) -> Self { @@ -243,7 +255,7 @@ impl BatchExecutorHandle { } /// Implementation of the "primary" (non-test) batch executor. -/// Upon launch, it initialized the VM object with provided block context and properties, and keeps applying +/// Upon launch, it initializes the VM object with provided block context and properties, and keeps applying /// transactions until the batch is sealed. /// /// One `BatchExecutor` can execute exactly one batch, so once the batch is sealed, a new `BatchExecutor` object must @@ -252,11 +264,13 @@ impl BatchExecutorHandle { pub(super) struct BatchExecutor { reexecute_each_tx: bool, max_allowed_tx_gas_limit: U256, + validation_computational_gas_limit: u32, commands: mpsc::Receiver, + vm_gas_limit: Option, } #[allow(clippy::large_enum_variant)] -pub(super) enum Command { +pub(crate) enum Command { ExecuteTx(Transaction, mpsc::SyncSender), RollbackLastTx(mpsc::SyncSender<()>), FinishBatch(mpsc::SyncSender), @@ -266,23 +280,37 @@ impl BatchExecutor { pub(super) fn run( self, secondary_storage: SecondaryStateStorage, - block_context: BlockContextMode, - block_properties: BlockProperties, + l1_batch_params: L1BatchParams, ) { vlog::info!( "Starting executing batch #{}", - block_context.inner_block_context().context.block_number + l1_batch_params + .context_mode + .inner_block_context() + .context + .block_number ); let mut storage_view = StorageView::new(&secondary_storage); let mut oracle_tools = vm::OracleTools::new(&mut storage_view as &mut dyn Storage); - let mut vm = init_vm( - &mut oracle_tools, - block_context, - &block_properties, - TxExecutionMode::VerifyExecute, - ); + let mut vm = match self.vm_gas_limit { + Some(vm_gas_limit) => init_vm_with_gas_limit( + &mut oracle_tools, + l1_batch_params.context_mode, + &l1_batch_params.properties, + TxExecutionMode::VerifyExecute, + &l1_batch_params.base_system_contracts, + vm_gas_limit, + ), + None => init_vm( + &mut oracle_tools, + l1_batch_params.context_mode, + &l1_batch_params.properties, + TxExecutionMode::VerifyExecute, + &l1_batch_params.base_system_contracts, + ), + }; while let Ok(cmd) = self.commands.recv() { match cmd { @@ -306,7 +334,6 @@ impl BatchExecutor { fn execute_tx(&self, tx: &Transaction, vm: &mut VmInstance) -> TxExecutionResult { let gas_consumed_before_tx = vm.gas_consumed(); - let updated_storage_slots_before_tx = vm.number_of_updated_storage_slots(); // Save pre-`execute_next_tx` VM snapshot. vm.save_current_vm_as_snapshot(); @@ -325,6 +352,7 @@ impl BatchExecutor { bootloader_dry_run_result: None, tx_metrics: None, bootloader_dry_run_metrics: None, + compressed_bytecodes: vec![], }; } @@ -358,9 +386,8 @@ impl BatchExecutor { let tx_metrics = Self::get_execution_metrics( vm, Some(tx), - &tx_result.as_ref().unwrap().result, + &tx_result.as_ref().unwrap().0.result, gas_consumed_before_tx, - updated_storage_slots_before_tx, ); result.add_tx_metrics(tx_metrics); @@ -400,20 +427,90 @@ impl BatchExecutor { &self, tx: &Transaction, vm: &mut VmInstance, - ) -> Result { - push_transaction_to_bootloader_memory(vm, tx, TxExecutionMode::VerifyExecute); - vm.execute_next_tx() + ) -> Result<(VmTxExecutionResult, Vec), TxRevertReason> { + // Note, that the space where we can put the calldata for compressing transactions + // is limited and the transactions do not pay for taking it. + // In order to not let the accounts spam the space of compressed bytecodes with bytecodes + // that will not be published (e.g. due to out of gas), we use the following scheme: + // We try to execute the transaction with compressed bytecodes. + // If it fails and the compressed bytecodes have not been published, + // it means that there is no sense in pollutting the space of compressed bytecodes, + // and so we reeexecute the transaction, but without compressions. + + // Saving the snapshot before executing + vm.save_current_vm_as_snapshot(); + + let compressed_bytecodes = if tx.is_l1() || tx.execute.factory_deps.is_none() { + // For L1 transactions there are no compressed bytecodes + vec![] + } else { + tx.execute + .factory_deps + .as_ref() + .unwrap() + .iter() + .filter(|dep| { + !vm.state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(dep)) + }) + .filter_map(|dep| CompressedBytecodeInfo::from_original(dep.clone()).ok()) + .collect() + }; + + push_transaction_to_bootloader_memory( + vm, + tx, + TxExecutionMode::VerifyExecute, + Some(compressed_bytecodes.clone()), + ); + let result_with_compression = + vm.execute_next_tx(self.validation_computational_gas_limit)?; + + let at_least_one_unpublished = compressed_bytecodes.iter().any(|info| { + !vm.state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }); + + if at_least_one_unpublished { + // Rollbacking and now trying to execute one more time. + vm.rollback_to_latest_snapshot_popping(); + push_transaction_to_bootloader_memory( + vm, + tx, + TxExecutionMode::VerifyExecute, + Some(vec![]), + ); + + vm.execute_next_tx(self.validation_computational_gas_limit) + .map(|val| (val, vec![])) + } else { + // Remove the snapshot taken at the start of this function as it is not needed anymore. + vm.pop_snapshot_no_rollback(); + + Ok((result_with_compression, compressed_bytecodes)) + } } fn reexecute_tx_in_vm( &self, vm: &mut VmInstance<'_>, tx: &Transaction, - expected_tx_result: Result, + expected_tx_result: Result< + (VmTxExecutionResult, Vec), + TxRevertReason, + >, ) { // Rollback to the pre-`execute_next_tx` VM snapshot. // `rollback_to_latest_snapshot` (not `rollback_to_latest_snapshot_popping`) is used here because - // we will need this snapshot again if seal criteria will result in `ExcludeAndSead`. + // we will need this snapshot again if seal criteria will result in `ExcludeAndSeal`. vm.rollback_to_latest_snapshot(); let alternative_result = self.execute_tx_in_vm(tx, vm); assert_eq!( @@ -430,20 +527,14 @@ impl BatchExecutor { ) -> Result<(VmPartialExecutionResult, ExecutionMetricsForCriteria), TxRevertReason> { let stage_started_at = Instant::now(); let gas_consumed_before = vm.gas_consumed(); - let updated_storage_slots_before = vm.number_of_updated_storage_slots(); // Save pre-`execute_till_block_end` VM snapshot. vm.save_current_vm_as_snapshot(); let block_tip_result = vm.execute_block_tip(); let result = match &block_tip_result.revert_reason { None => { - let metrics = Self::get_execution_metrics( - vm, - None, - &block_tip_result, - gas_consumed_before, - updated_storage_slots_before, - ); + let metrics = + Self::get_execution_metrics(vm, None, &block_tip_result, gas_consumed_before); Ok((block_tip_result, metrics)) } Some(TxRevertReason::BootloaderOutOfGas) => Err(TxRevertReason::BootloaderOutOfGas), @@ -469,10 +560,7 @@ impl BatchExecutor { tx: Option<&Transaction>, execution_result: &VmPartialExecutionResult, gas_consumed_before: u32, - updated_storage_slots_before: usize, ) -> ExecutionMetricsForCriteria { - let storage_updates = vm.number_of_updated_storage_slots() - updated_storage_slots_before; - let gas_consumed_after = vm.gas_consumed(); assert!( gas_consumed_after >= gas_consumed_before, @@ -503,7 +591,6 @@ impl BatchExecutor { }; ExecutionMetricsForCriteria { - storage_updates, l1_gas, execution_metrics, } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index c056830b4513..0901eef91b4e 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -3,14 +3,14 @@ use crate::state_keeper::batch_executor::tests::tester::TestConfig; use self::tester::{Account, Tester}; use assert_matches::assert_matches; use db_test_macro::db_test; -use vm::{utils::BLOCK_GAS_LIMIT, TxRevertReason}; -use zksync_types::PriorityOpId; +use vm::TxRevertReason; +use zksync_types::{tx::tx_execution_info::TxExecutionStatus, PriorityOpId}; mod tester; use super::TxExecutionResult; -/// Ensures that transaction was executed successfully. +/// Ensures that the transaction was executed successfully. fn assert_executed(execution_result: TxExecutionResult) { assert_matches!(execution_result.tx_result, Ok(_)); assert_matches!(execution_result.bootloader_dry_run_result, Some(Ok(_))); @@ -18,7 +18,7 @@ fn assert_executed(execution_result: TxExecutionResult) { assert_matches!(execution_result.bootloader_dry_run_metrics, Some(_)); } -/// Ensures that transaction was rejected. +/// Ensures that the transaction was rejected. fn assert_rejected(execution_result: TxExecutionResult) { assert_matches!(execution_result.tx_result, Err(_)); assert_matches!(execution_result.bootloader_dry_run_result, None); @@ -26,6 +26,15 @@ fn assert_rejected(execution_result: TxExecutionResult) { assert_matches!(execution_result.bootloader_dry_run_metrics, None); } +/// Ensures that the transaction was executed successfully but reverted by the VM. +fn assert_reverted(execution_result: TxExecutionResult) { + assert_executed(execution_result.clone()); + assert_matches!( + execution_result.tx_result.unwrap().status, + TxExecutionStatus::Failure + ); +} + /// Checks that we can successfully execute a single L2 tx in batch executor. #[db_test] async fn execute_l2_tx(connection_pool: ConnectionPool) { @@ -56,9 +65,27 @@ async fn execute_l1_tx(connection_pool: ConnectionPool) { executor.finish_batch(); } +/// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. +#[db_test] +async fn execute_l2_and_l1_txs(connection_pool: ConnectionPool) { + let mut alice = Account::random(); + + let tester = Tester::new(connection_pool); + tester.genesis().await; + tester.fund(&[alice.address()]); + let executor = tester.create_batch_executor(); + + let res = executor.execute_tx(alice.execute()); + assert_executed(res); + + let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))); + assert_executed(res); + + executor.finish_batch(); +} + /// Checks that we can successfully rollback the transaction and execute it once again. #[db_test] -#[ignore] async fn rollback(connection_pool: ConnectionPool) { let mut alice = Account::random(); @@ -81,6 +108,7 @@ async fn rollback(connection_pool: ConnectionPool) { res_old.tx_metrics, res_new.tx_metrics, "Execution results must be the same" ); + executor.finish_batch(); } @@ -99,7 +127,7 @@ async fn reject_tx(connection_pool: ConnectionPool) { executor.finish_batch(); } -/// Checks that we tx with too big gas limit is correctly rejected. +/// Checks that tx with too big gas limit is correctly rejected. #[db_test] async fn too_big_gas_limit(connection_pool: ConnectionPool) { let mut alice = Account::random(); @@ -140,6 +168,7 @@ async fn tx_cant_be_reexecuted(connection_pool: ConnectionPool) { let tx = alice.execute(); let res1 = executor.execute_tx(tx.clone()); assert_executed(res1); + // Nonce is used for the second tx. let res2 = executor.execute_tx(tx); assert_rejected(res2); @@ -163,44 +192,145 @@ async fn deploy_and_call_loadtest(connection_pool: ConnectionPool) { 10, 10000000, ))); - assert_executed(executor.execute_tx(alice.loadnext_custom_writes_call(loadtest_address, 1))); + assert_executed(executor.execute_tx(alice.loadnext_custom_writes_call( + loadtest_address, + 1, + 500_000_000, + ))); executor.finish_batch(); } -/// Checks that we can cause the bootloader out of gas error on tip phase. +/// Checks that a tx that is reverted by the VM still can be included into a batch. #[db_test] -#[ignore] -async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { +async fn execute_reverted_tx(connection_pool: ConnectionPool) { + let mut alice = Account::random(); + + let tester = Tester::new(connection_pool); + tester.genesis().await; + tester.fund(&[alice.address()]); + let executor = tester.create_batch_executor(); + + let (deploy_tx, loadtest_address) = alice.deploy_loadnext_tx(); + assert_executed(executor.execute_tx(deploy_tx)); + + assert_reverted(executor.execute_tx(alice.loadnext_custom_writes_call( + loadtest_address, + 1, + 1_000_000, // We provide enough gas for tx to be executed, but not enough for the call to be successful. + ))); + executor.finish_batch(); +} + +/// Runs the batch executor through a semi-realistic basic scenario: +/// a batch with different operations, both successful and not. +#[db_test] +async fn execute_realistic_scenario(connection_pool: ConnectionPool) { + let mut alice = Account::random(); + let mut bob = Account::random(); + + let tester = Tester::new(connection_pool); + tester.genesis().await; + tester.fund(&[alice.address()]); + tester.fund(&[bob.address()]); + let executor = tester.create_batch_executor(); + + // A good tx should be executed successfully. + let res = executor.execute_tx(alice.execute()); + assert_executed(res); + + // Execute a good tx successfully, roll if back, and execute it again. + let tx_to_be_rolled_back = alice.execute(); + let res = executor.execute_tx(tx_to_be_rolled_back.clone()); + assert_executed(res); + + executor.rollback_last_tx(); + + let res = executor.execute_tx(tx_to_be_rolled_back.clone()); + assert_executed(res); + + // A good tx from a different account should be executed successfully. + let res = executor.execute_tx(bob.execute()); + assert_executed(res); + + // If we try to execute an already executed again it should be rejected. + let res = executor.execute_tx(tx_to_be_rolled_back); + assert_rejected(res); + + // An unrelated good tx should be executed successfully. + executor.rollback_last_tx(); // Roll back the vm to the pre-rejected-tx state. + + // No need to reset the nonce because a tx with the current nonce was indeed executed. + let res = executor.execute_tx(alice.execute()); + assert_executed(res); + + // A good L1 tx should also be executed successfully. + let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))); + assert_executed(res); + + executor.finish_batch(); +} + +/// Checks that we handle the bootloader out of gas error on execution phase. +#[db_test] +async fn bootloader_out_of_gas_for_any_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); - // Disable the gas limit check. let tester = Tester::with_config( connection_pool, TestConfig { reexecute_each_tx: false, + vm_gas_limit: Some(10), max_allowed_tx_gas_limit: u32::MAX, + validation_computational_gas_limit: u32::MAX, }, ); + tester.genesis().await; tester.fund(&[alice.address()]); + let executor = tester.create_batch_executor(); + + let res = executor.execute_tx(alice.execute()); + + assert_rejected(res.clone()); + assert_matches!(res.err().unwrap(), TxRevertReason::BootloaderOutOfGas); + + executor.finish_batch(); +} +/// Checks that we can handle the bootloader out of gas error on tip phase. +#[db_test] +#[ignore] // This test is blocked by [PLA-50] as gas calculation are affected by the underflow. +async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { + let mut alice = Account::random(); + + let mut tester = Tester::new(connection_pool); + + tester.genesis().await; + tester.fund(&[alice.address()]); let executor = tester.create_batch_executor(); - let mut gas_remaining = BLOCK_GAS_LIMIT; - let (deploy_tx, loadnext_address) = alice.deploy_loadnext_tx(); + let res = executor.execute_tx(alice.execute()); + assert_executed(res); + + let vm_block_res = executor.finish_batch(); - let deploy_res = executor.execute_tx(deploy_tx); - assert_executed(deploy_res.clone()); - gas_remaining -= deploy_res.tx_metrics.unwrap().execution_metrics.gas_used as u32; + // Just a bit below the gas used for the previous batch execution should be fine to execute the tx + // but not enough to execute the block tip. + tester.set_config(TestConfig { + reexecute_each_tx: false, + vm_gas_limit: Some(vm_block_res.full_result.gas_used - 10), + max_allowed_tx_gas_limit: u32::MAX, + validation_computational_gas_limit: u32::MAX, + }); - let consume_gas_tx = - alice.loadnext_custom_gas_call(loadnext_address, gas_remaining, gas_remaining); - let res = executor.execute_tx(consume_gas_tx); + let second_executor = tester.create_batch_executor(); + let res = second_executor.execute_tx(alice.execute()); assert!(res.tx_result.is_ok()); assert_matches!( res.bootloader_dry_run_result, Some(Err(TxRevertReason::BootloaderOutOfGas)) ); - executor.finish_batch(); + + second_executor.finish_batch(); } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index a3176bf90fb4..6f97af77a872 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -2,14 +2,17 @@ //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. use crate::genesis::chain_schema_genesis; -use crate::state_keeper::batch_executor::BatchExecutorHandle; +use crate::state_keeper::{ + batch_executor::BatchExecutorHandle, + io::L1BatchParams, + tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, +}; use tempfile::TempDir; use vm::{ test_utils::{ get_create_zksync_address, get_deploy_tx, mock_loadnext_gas_burn_call, mock_loadnext_test_call, }, - utils::default_block_properties, vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, zk_evm::{ block_properties::BlockProperties, @@ -40,7 +43,9 @@ const CHAIN_ID: L2ChainId = L2ChainId(270); #[derive(Debug)] pub(super) struct TestConfig { pub(super) reexecute_each_tx: bool, + pub(super) vm_gas_limit: Option, pub(super) max_allowed_tx_gas_limit: u32, + pub(super) validation_computational_gas_limit: u32, } impl TestConfig { @@ -50,7 +55,12 @@ impl TestConfig { Self { reexecute_each_tx: true, + vm_gas_limit: None, max_allowed_tx_gas_limit: config.chain.state_keeper.max_allowed_l2_tx_gas_limit, + validation_computational_gas_limit: config + .chain + .state_keeper + .validation_computational_gas_limit, } } } @@ -79,6 +89,10 @@ impl Tester { } } + pub(super) fn set_config(&mut self, config: TestConfig) { + self.config = config; + } + /// Creates a batch executor instance. /// This function intentionally uses sensible defaults to not introduce boilerplate. pub(super) fn create_batch_executor(&self) -> BatchExecutorHandle { @@ -96,13 +110,18 @@ impl Tester { )); // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden - // for the test pool (see the doc-comment on `TestPool` for detauls). + // for the test pool (see the doc-comment on `TestPool` for details). BatchExecutorHandle::new( self.config.reexecute_each_tx, self.config.max_allowed_tx_gas_limit.into(), - block_context, - block_properties, + self.config.validation_computational_gas_limit, secondary_storage, + L1BatchParams { + context_mode: block_context, + properties: block_properties, + base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + }, + self.config.vm_gas_limit, ) } @@ -138,7 +157,13 @@ impl Tester { let mut storage = self.pool.access_storage_blocking(); if storage.blocks_dal().is_genesis_needed() { let chain_id = H256::from_low_u64_be(CHAIN_ID.0 as u64); - chain_schema_genesis(&mut storage, self.fee_account, chain_id).await; + chain_schema_genesis( + &mut storage, + self.fee_account, + chain_id, + BASE_SYSTEM_CONTRACTS.clone(), + ) + .await; } } @@ -223,6 +248,15 @@ impl Account { /// Returns a valid `execute` transaction initiated from L1. /// Does not increment nonce. pub(super) fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction { + let execute = Execute { + contract_address: Address::random(), + value: Default::default(), + calldata: vec![], + factory_deps: None, + }; + + let max_fee_per_gas = U256::from(1u32); + let gas_limit = U256::from(100_100); let priority_op_data = L1TxCommonData { sender: self.address(), canonical_tx_hash: H256::from_low_u64_be(serial_id.0), @@ -230,23 +264,17 @@ impl Account { deadline_block: 100000, layer_2_tip_fee: U256::zero(), full_fee: U256::zero(), - gas_limit: U256::from(100_100), + gas_limit, + max_fee_per_gas, op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, eth_hash: H256::random(), eth_block: 1, gas_per_pubdata_limit: U256::from(1_000_000), - to_mint: U256::zero(), + to_mint: gas_limit * max_fee_per_gas + execute.value, refund_recipient: self.address(), }; - let execute = Execute { - contract_address: Address::random(), - value: Default::default(), - calldata: vec![], - factory_deps: None, - }; - let tx = L1Tx { common_data: priority_op_data, execute, @@ -284,13 +312,14 @@ impl Account { &mut self, address: Address, writes: u32, + gas_limit: u32, ) -> Transaction { // For each iteration of the expensive contract, there are two slots that are updated: // the length of the vector and the new slot with the element itself. let minimal_fee = 2 * DEFAULT_GAS_PER_PUBDATA * writes * INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32; - let fee = fee(minimal_fee + 500_000_000); + let fee = fee(minimal_fee + gas_limit); let tx = mock_loadnext_test_call( self.pk, diff --git a/core/bin/zksync_core/src/state_keeper/extractors.rs b/core/bin/zksync_core/src/state_keeper/extractors.rs index 91d838a986ef..b2ad2851d6f9 100644 --- a/core/bin/zksync_core/src/state_keeper/extractors.rs +++ b/core/bin/zksync_core/src/state_keeper/extractors.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::time::{Duration, Instant}; use vm::vm_with_bootloader::{get_bootloader_memory, BlockContextMode, TxExecutionMode}; +use vm::zk_evm::aux_structures::LogQuery; use zksync_dal::StorageProcessor; use zksync_types::block::DeployedContract; use zksync_types::tx::{IncludedTxLocation, TransactionExecutionResult}; @@ -155,20 +156,32 @@ pub(crate) fn get_initial_bootloader_memory( .map(|res| res.operator_suggested_refund) .collect(); + let compressed_bytecodes = updates_accumulator + .executed_transactions + .iter() + .map(|res| res.compressed_bytecodes.clone()) + .collect(); + get_bootloader_memory( transactions_data, refunds, + compressed_bytecodes, TxExecutionMode::VerifyExecute, block_context, ) } -pub(crate) fn log_query_write_read_counts(logs: &[StorageLogQuery]) -> (usize, usize) { +pub(crate) fn storage_log_query_write_read_counts(logs: &[StorageLogQuery]) -> (usize, usize) { let (reads, writes): (Vec<&StorageLogQuery>, Vec<&StorageLogQuery>) = logs.iter().partition(|l| l.log_query.rw_flag); (reads.len(), writes.len()) } +pub(crate) fn log_query_write_read_counts(logs: &[LogQuery]) -> (usize, usize) { + let (reads, writes): (Vec<&LogQuery>, Vec<&LogQuery>) = logs.iter().partition(|l| l.rw_flag); + (reads.len(), writes.len()) +} + pub(crate) fn contracts_deployed_this_miniblock( unique_storage_updates: Vec<(StorageKey, (H256, StorageValue))>, storage: &mut StorageProcessor<'_>, diff --git a/core/bin/zksync_core/src/state_keeper/io/common.rs b/core/bin/zksync_core/src/state_keeper/io/common.rs new file mode 100644 index 000000000000..e52dee78e86f --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/io/common.rs @@ -0,0 +1,64 @@ +use std::time::{Duration, Instant}; + +use vm::{ + vm_with_bootloader::{BlockContext, BlockContextMode}, + zk_evm::block_properties::BlockProperties, +}; +use zksync_contracts::BaseSystemContracts; +use zksync_types::{Address, L1BatchNumber, U256, ZKPORTER_IS_AVAILABLE}; +use zksync_utils::h256_to_u256; + +use super::L1BatchParams; + +#[derive(Debug)] +pub(crate) struct StateKeeperStats { + pub(crate) num_contracts: u64, +} + +/// Returns the parameters required to initialize the VM for the next L1 batch. +pub(crate) fn l1_batch_params( + current_l1_batch_number: L1BatchNumber, + operator_address: Address, + l1_batch_timestamp: u64, + previous_block_hash: U256, + l1_gas_price: u64, + fair_l2_gas_price: u64, + base_system_contracts: BaseSystemContracts, +) -> L1BatchParams { + let block_properties = BlockProperties { + default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }; + + let context = BlockContext { + block_number: current_l1_batch_number.0, + block_timestamp: l1_batch_timestamp, + l1_gas_price, + fair_l2_gas_price, + operator_address, + }; + + L1BatchParams { + context_mode: BlockContextMode::NewBlock(context.into(), previous_block_hash), + properties: block_properties, + base_system_contracts, + } +} + +/// Runs the provided closure `f` until it returns `Some` or the `max_wait` time has elapsed. +pub(crate) fn poll_until Option>( + delay_interval: Duration, + max_wait: Duration, + mut f: F, +) -> Option { + let wait_interval = delay_interval.min(max_wait); + let start = Instant::now(); + while start.elapsed() <= max_wait { + let res = f(); + if res.is_some() { + return res; + } + std::thread::sleep(wait_interval); + } + None +} diff --git a/core/bin/zksync_core/src/state_keeper/io/mempool.rs b/core/bin/zksync_core/src/state_keeper/io/mempool.rs index b45f5c1f3586..ee439b7ead28 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mempool.rs @@ -2,37 +2,27 @@ use std::sync::Arc; use std::time::Duration; use std::time::Instant; -use vm::utils::default_block_properties; -use vm::vm_with_bootloader::BlockContext; -use vm::vm_with_bootloader::BlockContextMode; +use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; use vm::vm_with_bootloader::DerivedBlockContext; -use vm::zk_evm::block_properties::BlockProperties; use vm::VmBlockResult; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::EthInterface; use zksync_mempool::L2TxFilter; -use zksync_types::block::MiniblockHeader; -use zksync_types::event::{extract_added_tokens, extract_long_l2_to_l1_messages}; -use zksync_types::log_query_sorter::sort_storage_access_queries; -use zksync_types::FAIR_L2_GAS_PRICE; -use zksync_types::{ - block::L1BatchHeader, Address, L1BatchNumber, MiniblockNumber, StorageLogQueryType, - Transaction, U256, -}; -use zksync_utils::{miniblock_hash, time::millis_since_epoch}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; +use zksync_utils::time::millis_since_epoch; use crate::gas_adjuster::GasAdjuster; -use crate::state_keeper::extractors; -use crate::state_keeper::updates::UpdatesManager; -use crate::state_keeper::MempoolGuard; - -use super::PendingBatchData; -use super::StateKeeperIO; - -#[derive(Debug)] -struct StateKeeperStats { - num_contracts: u64, -} +use crate::state_keeper::{ + extractors, + io::{ + common::{l1_batch_params, poll_until, StateKeeperStats}, + seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, + L1BatchParams, PendingBatchData, StateKeeperIO, + }, + updates::UpdatesManager, + MempoolGuard, +}; /// Mempool-based IO for the state keeper. /// Receives transactions from the database through the mempool filtering logic. @@ -46,6 +36,7 @@ pub(crate) struct MempoolIO { current_miniblock_number: MiniblockNumber, current_l1_batch_number: L1BatchNumber, fee_account: Address, + fair_l2_gas_price: u64, delay_interval: Duration, // Grafana metrics @@ -53,6 +44,8 @@ pub(crate) struct MempoolIO { // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. gas_adjuster: Arc>, + + base_system_contracts: BaseSystemContracts, } impl StateKeeperIO for MempoolIO { @@ -79,29 +72,51 @@ impl StateKeeperIO fo &mut storage, self.current_l1_batch_number, ); + + let base_system_contracts = storage.storage_dal().get_base_system_contracts( + pending_miniblock_header + .base_system_contracts_hashes + .bootloader, + pending_miniblock_header + .base_system_contracts_hashes + .default_aa, + ); + vlog::info!("previous_l1_batch_hash: {}", previous_l1_batch_hash); - let params = self.default_block_params( + let params = l1_batch_params( + self.current_l1_batch_number, + self.fee_account, pending_miniblock_header.timestamp, previous_l1_batch_hash, pending_miniblock_header.l1_gas_price, pending_miniblock_header.l2_fair_gas_price, + base_system_contracts, ); let txs = storage.transactions_dal().get_transactions_to_reexecute(); + // Initialize the filter for the transactions that come after the pending batch. + // We use values from the pending block to match the filter with one used before the restart. + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_miniblock_header.l1_gas_price, + pending_miniblock_header.l2_fair_gas_price, + ); + self.filter = L2TxFilter { + l1_gas_price: pending_miniblock_header.l1_gas_price, + fee_per_gas: base_fee, + gas_per_pubdata: gas_per_pubdata as u32, + }; + Some(PendingBatchData { params, txs }) } - fn wait_for_new_batch_params( - &mut self, - max_wait: Duration, - ) -> Option<(BlockContextMode, BlockProperties)> { + fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. poll_until(self.delay_interval, max_wait, || { // We create a new filter each time, since parameters may change and a previously // ignored transaction in the mempool may be scheduled for the execution. - self.filter = self.gas_adjuster.l2_tx_filter(); + self.filter = self.gas_adjuster.l2_tx_filter(self.fair_l2_gas_price); self.mempool.has_next(&self.filter).then(|| { // We only need to get the root hash when we're certain that we have a new transaction. vlog::info!("getting previous block hash"); @@ -113,17 +128,31 @@ impl StateKeeperIO fo ) }; vlog::info!("previous_l1_batch_hash: {}", previous_l1_batch_hash); + vlog::info!( + "(l1_gas_price,fair_l2_gas_price) for block {} is ({},{})", + self.current_l1_batch_number.0, + self.filter.l1_gas_price, + self.fair_l2_gas_price + ); - self.default_block_params( + l1_batch_params( + self.current_l1_batch_number, + self.fee_account, (millis_since_epoch() / 1000) as u64, previous_l1_batch_hash, self.filter.l1_gas_price, - FAIR_L2_GAS_PRICE, + self.fair_l2_gas_price, + self.base_system_contracts.clone(), ) }) }) } + fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { + let new_miniblock_timestamp = (millis_since_epoch() / 1000) as u64; + Some(new_miniblock_timestamp) + } + fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { poll_until(self.delay_interval, max_wait, || { let started_at = Instant::now(); @@ -166,12 +195,18 @@ impl StateKeeperIO fo .mark_tx_as_rejected(rejected.hash(), &format!("rejected: {}", error)); } - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) -> u64 { - let new_miniblock_timestamp = (millis_since_epoch() / 1000) as u64; + fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { let pool = self.pool.clone(); let mut storage = pool.access_storage_blocking(); - self.seal_miniblock_impl(&mut storage, updates_manager, false); - new_miniblock_timestamp + seal_miniblock_impl( + self.current_miniblock_number, + self.current_l1_batch_number, + &mut self.statistics, + &mut storage, + updates_manager, + false, + ); + self.current_miniblock_number += 1; } fn seal_l1_batch( @@ -188,7 +223,18 @@ impl StateKeeperIO fo ); let pool = self.pool.clone(); let mut storage = pool.access_storage_blocking(); - self.seal_l1_batch_impl(&mut storage, block_result, updates_manager, block_context); + seal_l1_batch_impl( + self.current_miniblock_number, + self.current_l1_batch_number, + &mut self.statistics, + self.fee_account, + &mut storage, + block_result, + updates_manager, + block_context, + ); + self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. + self.current_l1_batch_number += 1; } } @@ -197,14 +243,21 @@ impl MempoolIO { mempool: MempoolGuard, pool: ConnectionPool, fee_account: Address, + fair_l2_gas_price: u64, delay_interval: Duration, gas_adjuster: Arc>, + base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Self { let mut storage = pool.access_storage_blocking(); let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); let num_contracts = storage.storage_load_dal().load_number_of_contracts(); let filter = L2TxFilter::default(); // Will be initialized properly on the first newly opened batch. + + let base_system_contracts = storage.storage_dal().get_base_system_contracts( + base_system_contracts_hashes.bootloader, + base_system_contracts_hashes.default_aa, + ); drop(storage); Self { @@ -214,9 +267,11 @@ impl MempoolIO { current_l1_batch_number: last_sealed_block_header.number + 1, current_miniblock_number: last_miniblock_number + 1, fee_account, + fair_l2_gas_price, delay_interval, statistics: StateKeeperStats { num_contracts }, gas_adjuster, + base_system_contracts, } } @@ -227,396 +282,4 @@ impl MempoolIO { .unwrap(); last_miniblock_number_included_in_l1_batch + 1 } - - fn miniblock_assertions(&self, updates_manager: &UpdatesManager, is_fictive: bool) { - if is_fictive { - assert!(updates_manager.miniblock.executed_transactions.is_empty()); - } else { - assert!(!updates_manager.miniblock.executed_transactions.is_empty()); - } - - let first_tx_index_in_miniblock = updates_manager.l1_batch.executed_transactions.len(); - let next_tx_index = updates_manager.pending_executed_transactions_len(); - let miniblock_tx_index_range = if is_fictive { - next_tx_index..(next_tx_index + 1) - } else { - first_tx_index_in_miniblock..next_tx_index - }; - - for event in updates_manager.miniblock.events.iter() { - assert!(miniblock_tx_index_range.contains(&(event.location.1 as usize))) - } - for storage_log in updates_manager.miniblock.storage_logs.iter() { - assert!(miniblock_tx_index_range - .contains(&(storage_log.log_query.tx_number_in_block as usize))) - } - } - - fn track_l1_batch_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { - metrics::histogram!( - "server.state_keeper.l1_batch.sealed_time_stage", - stage_started_at.elapsed(), - "stage" => stage - ); - *stage_started_at = Instant::now(); - } - - fn track_miniblock_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { - metrics::histogram!( - "server.state_keeper.miniblock.sealed_time_stage", - stage_started_at.elapsed(), - "stage" => stage - ); - *stage_started_at = Instant::now(); - } - - // If `is_fictive` flag is set to true, then it is assumed that - // we should seal a fictive miniblock with no transactions in it. It is needed because - // there might be some storage logs/events that are created - // after the last processed tx in l1 batch. - // For now, there is only one event for sending the fee to the operator.. - fn seal_miniblock_impl( - &mut self, - storage: &mut StorageProcessor<'_>, - updates_manager: &UpdatesManager, - is_fictive: bool, - ) { - self.miniblock_assertions(updates_manager, is_fictive); - - let started_at = Instant::now(); - let mut stage_started_at: Instant = Instant::now(); - - let (l1_tx_count, l2_tx_count) = - extractors::l1_l2_tx_count(&updates_manager.miniblock.executed_transactions); - vlog::info!( - "sealing miniblock {} (l1 batch {}) with {} ({} l2 + {} l1) txs, {} events, (writes, reads): {:?}", - self.current_miniblock_number, - self.current_l1_batch_number, - l1_tx_count + l2_tx_count, - l2_tx_count, - l1_tx_count, - updates_manager.miniblock.events.len(), - extractors::log_query_write_read_counts(&updates_manager.miniblock.storage_logs), - ); - - let mut transaction = storage.start_transaction_blocking(); - let miniblock_header = MiniblockHeader { - number: self.current_miniblock_number, - timestamp: updates_manager.miniblock.timestamp, - hash: miniblock_hash(self.current_miniblock_number), - l1_tx_count: l1_tx_count as u16, - l2_tx_count: l2_tx_count as u16, - base_fee_per_gas: updates_manager.base_fee_per_gas(), - l1_gas_price: updates_manager.l1_gas_price(), - l2_fair_gas_price: updates_manager.fair_l2_gas_price(), - }; - - transaction.blocks_dal().insert_miniblock(miniblock_header); - Self::track_miniblock_execution_stage("insert_miniblock_header", &mut stage_started_at); - - transaction - .transactions_dal() - .mark_txs_as_executed_in_miniblock( - self.current_miniblock_number, - &updates_manager.miniblock.executed_transactions, - updates_manager.base_fee_per_gas().into(), - ); - Self::track_miniblock_execution_stage( - "mark_transactions_in_miniblock", - &mut stage_started_at, - ); - - let storage_logs = extractors::log_queries_to_storage_logs( - &updates_manager.miniblock.storage_logs, - updates_manager, - is_fictive, - ); - let write_logs = extractors::write_logs_from_storage_logs(storage_logs); - - transaction - .storage_logs_dal() - .insert_storage_logs(self.current_miniblock_number, &write_logs); - Self::track_miniblock_execution_stage("insert_storage_logs", &mut stage_started_at); - - let unique_updates = transaction.storage_dal().apply_storage_logs(&write_logs); - Self::track_miniblock_execution_stage("apply_storage_logs", &mut stage_started_at); - - let new_factory_deps = updates_manager.miniblock.new_factory_deps.clone(); - if !new_factory_deps.is_empty() { - transaction - .storage_dal() - .insert_factory_deps(self.current_miniblock_number, new_factory_deps); - } - Self::track_miniblock_execution_stage("insert_factory_deps", &mut stage_started_at); - - // Factory deps should be inserted before using `contracts_deployed_this_miniblock`. - let deployed_contracts = - extractors::contracts_deployed_this_miniblock(unique_updates, &mut transaction); - if !deployed_contracts.is_empty() { - self.statistics.num_contracts += deployed_contracts.len() as u64; - } - - let added_tokens = extract_added_tokens(&updates_manager.miniblock.events); - if !added_tokens.is_empty() { - transaction.tokens_dal().add_tokens(added_tokens); - } - Self::track_miniblock_execution_stage("insert_tokens", &mut stage_started_at); - - let events_this_miniblock = extractors::extract_events_this_block( - &updates_manager.miniblock.events, - updates_manager, - is_fictive, - ); - transaction - .events_dal() - .save_events(self.current_miniblock_number, events_this_miniblock); - Self::track_miniblock_execution_stage("insert_events", &mut stage_started_at); - - let l2_to_l1_logs_this_miniblock = extractors::extract_l2_to_l1_logs_this_block( - &updates_manager.miniblock.l2_to_l1_logs, - updates_manager, - is_fictive, - ); - transaction - .events_dal() - .save_l2_to_l1_logs(self.current_miniblock_number, l2_to_l1_logs_this_miniblock); - Self::track_miniblock_execution_stage("insert_l2_to_l1_logs", &mut stage_started_at); - - transaction.commit_blocking(); - Self::track_miniblock_execution_stage("commit_miniblock", &mut stage_started_at); - - metrics::histogram!( - "server.state_keeper.miniblock.transactions_in_miniblock", - updates_manager.miniblock.executed_transactions.len() as f64 - ); - metrics::histogram!( - "server.miniblock.latency", - ((millis_since_epoch() - updates_manager.miniblock.timestamp as u128 * 1000) as f64) / 1000f64, - "stage" => "sealed" - ); - metrics::histogram!( - "server.state_keeper.miniblock.sealed_time", - started_at.elapsed(), - ); - metrics::gauge!( - "server.miniblock.number", - self.current_miniblock_number.0 as f64, - "stage" => "sealed" - ); - - metrics::gauge!( - "server.state_keeper.storage_contracts_size", - self.statistics.num_contracts as f64 - ); - vlog::debug!( - "sealed miniblock {} in {:?}", - self.current_miniblock_number, - started_at.elapsed() - ); - - Self::track_miniblock_execution_stage( - "apply_miniblock_updates_to_l1_batch_updates_accumulator", - &mut stage_started_at, - ); - self.current_miniblock_number += 1; - } - - fn seal_l1_batch_impl( - &mut self, - storage: &mut StorageProcessor<'_>, - block_result: VmBlockResult, - mut updates_manager: UpdatesManager, - block_context: DerivedBlockContext, - ) { - let started_at = Instant::now(); - let mut stage_started_at: Instant = Instant::now(); - - let mut transaction = storage.start_transaction_blocking(); - - // The vm execution was paused right after the last transaction was executed. - // There is some post-processing work that the VM needs to do before the block is fully processed. - let VmBlockResult { - full_result, - block_tip_result, - } = block_result; - assert!( - full_result.revert_reason.is_none(), - "VM must not revert when finalizing block. Revert reason: {:?}", - full_result.revert_reason - ); - Self::track_l1_batch_execution_stage("vm_finalization", &mut stage_started_at); - - updates_manager - .miniblock - .extend_from_fictive_transaction(block_tip_result.logs); - // Seal fictive miniblock with last events and storage logs. - self.seal_miniblock_impl(&mut transaction, &updates_manager, true); - Self::track_l1_batch_execution_stage("fictive_miniblock", &mut stage_started_at); - - let (_, deduped_log_queries) = - sort_storage_access_queries(&full_result.storage_log_queries); - Self::track_l1_batch_execution_stage("log_deduplication", &mut stage_started_at); - - let (l1_tx_count, l2_tx_count) = - extractors::l1_l2_tx_count(&updates_manager.l1_batch.executed_transactions); - vlog::info!( - "sealing l1 batch {:?} with {:?} ({:?} l2 + {:?} l1) txs, {:?} l2_l1_logs, {:?} events, (writes, reads): {:?} , (writes_dedup, reads_dedup): {:?} ", - self.current_l1_batch_number, - l1_tx_count + l2_tx_count, - l2_tx_count, - l1_tx_count, - full_result.l2_to_l1_logs.len(), - full_result.events.len(), - extractors::log_query_write_read_counts(&full_result.storage_log_queries), - extractors::log_query_write_read_counts(&deduped_log_queries), - ); - - let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( - &mut transaction, - self.current_l1_batch_number, - ); - let block_context_properties = BlockContextMode::NewBlock(block_context, hash); - - let l1_batch = L1BatchHeader { - number: self.current_l1_batch_number, - is_finished: true, - timestamp: block_context.context.block_timestamp, - fee_account_address: self.fee_account, - priority_ops_onchain_data: updates_manager.l1_batch.priority_ops_onchain_data.clone(), - l1_tx_count: l1_tx_count as u16, - l2_tx_count: l2_tx_count as u16, - l2_to_l1_logs: full_result.l2_to_l1_logs, - l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), - bloom: Default::default(), - initial_bootloader_contents: extractors::get_initial_bootloader_memory( - &updates_manager.l1_batch, - block_context_properties, - ), - used_contract_hashes: full_result.used_contract_hashes, - base_fee_per_gas: block_context.base_fee, - l1_gas_price: updates_manager.l1_gas_price(), - l2_fair_gas_price: updates_manager.fair_l2_gas_price(), - }; - - transaction - .blocks_dal() - .insert_l1_batch(l1_batch, updates_manager.l1_batch.l1_gas_count); - Self::track_l1_batch_execution_stage("insert_l1_batch_header", &mut stage_started_at); - - transaction - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(self.current_l1_batch_number); - Self::track_l1_batch_execution_stage( - "set_l1_batch_number_for_miniblocks", - &mut stage_started_at, - ); - - transaction - .transactions_dal() - .mark_txs_as_executed_in_l1_batch( - self.current_l1_batch_number, - &updates_manager.l1_batch.executed_transactions, - ); - Self::track_l1_batch_execution_stage( - "mark_txs_as_executed_in_l1_batch", - &mut stage_started_at, - ); - - transaction - .storage_logs_dedup_dal() - .insert_storage_logs(self.current_l1_batch_number, &deduped_log_queries); - Self::track_l1_batch_execution_stage("insert_storage_dedup_logs", &mut stage_started_at); - - let (protective_reads, deduplicated_writes): (Vec<_>, Vec<_>) = deduped_log_queries - .into_iter() - .partition(|log_query| log_query.log_type == StorageLogQueryType::Read); - transaction - .storage_logs_dedup_dal() - .insert_protective_reads(self.current_l1_batch_number, &protective_reads); - Self::track_l1_batch_execution_stage("insert_protective_reads", &mut stage_started_at); - - transaction - .storage_logs_dedup_dal() - .insert_initial_writes(self.current_l1_batch_number, &deduplicated_writes); - Self::track_l1_batch_execution_stage("insert_initial_writes", &mut stage_started_at); - - transaction.commit_blocking(); - Self::track_l1_batch_execution_stage("commit_l1_batch", &mut stage_started_at); - - metrics::histogram!( - "server.state_keeper.l1_batch.updated_storage_keys_len", - updates_manager.l1_batch.modified_storage_keys_number as f64 - ); - metrics::histogram!( - "server.state_keeper.l1_batch.transactions_in_l1_batch", - updates_manager.l1_batch.executed_transactions.len() as f64 - ); - metrics::histogram!( - "server.l1_batch.latency", - ((millis_since_epoch() - block_context.context.block_timestamp as u128 * 1000) as f64) / 1000f64, - "stage" => "sealed" - ); - metrics::gauge!( - "server.block_number", - self.current_l1_batch_number.0 as f64, - "stage" => "sealed" - ); - - metrics::histogram!( - "server.state_keeper.l1_batch.sealed_time", - started_at.elapsed(), - ); - vlog::debug!( - "sealed l1 batch {} in {:?}", - self.current_l1_batch_number, - started_at.elapsed() - ); - - self.current_l1_batch_number += 1; - } - - fn default_block_params( - &self, - l1_batch_timestamp: u64, - previous_block_hash: U256, - l1_gas_price: u64, - fair_l2_gas_price: u64, - ) -> (BlockContextMode, BlockProperties) { - vlog::info!( - "(l1_gas_price,fair_l2_gas_price) for block {} is ({l1_gas_price},{fair_l2_gas_price}", - self.current_l1_batch_number.0 - ); - - let block_properties = default_block_properties(); - - let context = BlockContext { - block_number: self.current_l1_batch_number.0, - block_timestamp: l1_batch_timestamp, - l1_gas_price, - fair_l2_gas_price, - operator_address: self.fee_account, - }; - - ( - BlockContextMode::NewBlock(context.into(), previous_block_hash), - block_properties, - ) - } -} - -fn poll_until Option>( - delay_interval: Duration, - max_wait: Duration, - mut f: F, -) -> Option { - let wait_interval = delay_interval.min(max_wait); - let start = Instant::now(); - while start.elapsed() <= max_wait { - let res = f(); - if res.is_some() { - return res; - } - std::thread::sleep(wait_interval); - } - None } diff --git a/core/bin/zksync_core/src/state_keeper/io/mod.rs b/core/bin/zksync_core/src/state_keeper/io/mod.rs index 88c809c23e51..1b567e123012 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mod.rs @@ -6,11 +6,24 @@ use vm::zk_evm::block_properties::BlockProperties; use vm::VmBlockResult; use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction}; -use crate::state_keeper::updates::UpdatesManager; +use super::updates::UpdatesManager; pub(crate) use mempool::MempoolIO; +use zksync_contracts::BaseSystemContracts; -mod mempool; +pub(crate) mod common; +pub(crate) mod mempool; +pub(crate) mod seal_logic; + +/// System parameters for L1 batch. +/// It includes system params such as Basic System Contracts and zkPorter configuration +/// And l1batch-specific parameters like timestamp, number, etc. +#[derive(Debug, Clone)] +pub struct L1BatchParams { + pub context_mode: BlockContextMode, + pub properties: BlockProperties, + pub base_system_contracts: BaseSystemContracts, +} /// Contains information about the un-synced execution state: /// Batch data and transactions that were executed before and are marked as so in the DB, @@ -21,10 +34,10 @@ mod mempool; /// /// Invariant is that there may be not more than 1 pending batch, and it's always the latest batch. #[derive(Debug)] -pub(crate) struct PendingBatchData { +pub struct PendingBatchData { /// Data used to initialize the pending batch. We have to make sure that all the parameters /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. - pub(crate) params: (BlockContextMode, BlockProperties), + pub(crate) params: L1BatchParams, /// List of miniblocks and corresponding transactions that were executed within batch. pub(crate) txs: Vec<(MiniblockNumber, Vec)>, } @@ -32,7 +45,7 @@ pub(crate) struct PendingBatchData { /// `StateKeeperIO` provides the interactive layer for the state keeper: /// it's used to receive volatile parameters (such as batch parameters), and also it's used to perform /// mutable operations on the persistent state (e.g. persist executed batches). -pub(crate) trait StateKeeperIO: 'static + std::fmt::Debug + Send { +pub trait StateKeeperIO: 'static + std::fmt::Debug + Send { /// Returns the number of the currently processed L1 batch. fn current_l1_batch_number(&self) -> L1BatchNumber; /// Returns the number of the currently processed miniblock (aka L2 block). @@ -42,10 +55,10 @@ pub(crate) trait StateKeeperIO: 'static + std::fmt::Debug + Send { fn load_pending_batch(&mut self) -> Option; /// Blocks for up to `max_wait` until the parameters for the next L1 batch are available. /// Returns the data required to initialize the VM for the next batch. - fn wait_for_new_batch_params( - &mut self, - max_wait: Duration, - ) -> Option<(BlockContextMode, BlockProperties)>; + fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; + /// Blocks for up to `max_wait` until the parameters for the next miniblock are available. + /// Right now it's only a timetamp. + fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option; @@ -55,7 +68,7 @@ pub(crate) trait StateKeeperIO: 'static + std::fmt::Debug + Send { fn reject(&mut self, tx: &Transaction, error: &str); /// Marks the miniblock (aka L2 block) as sealed. /// Returns the timestamp for the next miniblock. - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) -> u64; + fn seal_miniblock(&mut self, updates_manager: &UpdatesManager); /// Marks the L1 batch as sealed. fn seal_l1_batch( &mut self, diff --git a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs new file mode 100644 index 000000000000..3fbe18360a39 --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs @@ -0,0 +1,384 @@ +//! This module is a source-of-truth on what is expected to be done when sealing a block. +//! It contains the logic of the block sealing, which is used by both the mempool-based and external node IO. + +use std::time::Instant; + +use vm::vm_with_bootloader::BlockContextMode; +use vm::vm_with_bootloader::DerivedBlockContext; +use vm::VmBlockResult; +use zksync_dal::StorageProcessor; +use zksync_types::{ + block::L1BatchHeader, + block::MiniblockHeader, + event::{extract_added_tokens, extract_long_l2_to_l1_messages}, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + Address, L1BatchNumber, MiniblockNumber, +}; +use zksync_utils::{miniblock_hash, time::millis_since_epoch}; + +use crate::state_keeper::{extractors, io::common::StateKeeperStats, updates::UpdatesManager}; + +/// Persists an L1 batch in the storage. +/// This action includes a creation of an empty "fictive" miniblock that contains the events +/// generated during the bootloader "tip phase". +#[allow(clippy::too_many_arguments)] +pub(crate) fn seal_l1_batch_impl( + current_miniblock_number: MiniblockNumber, + current_l1_batch_number: L1BatchNumber, + statistics: &mut StateKeeperStats, + fee_account: Address, + storage: &mut StorageProcessor<'_>, + block_result: VmBlockResult, + mut updates_manager: UpdatesManager, + block_context: DerivedBlockContext, +) { + let started_at = Instant::now(); + let mut stage_started_at: Instant = Instant::now(); + + let mut transaction = storage.start_transaction_blocking(); + + // The vm execution was paused right after the last transaction was executed. + // There is some post-processing work that the VM needs to do before the block is fully processed. + let VmBlockResult { + full_result, + block_tip_result, + } = block_result; + assert!( + full_result.revert_reason.is_none(), + "VM must not revert when finalizing block. Revert reason: {:?}", + full_result.revert_reason + ); + track_l1_batch_execution_stage("vm_finalization", &mut stage_started_at); + + updates_manager.extend_from_fictive_transaction(block_tip_result.logs); + // Seal fictive miniblock with last events and storage logs. + seal_miniblock_impl( + current_miniblock_number, + current_l1_batch_number, + statistics, + &mut transaction, + &updates_manager, + true, + ); + track_l1_batch_execution_stage("fictive_miniblock", &mut stage_started_at); + + let (_, deduped_log_queries) = sort_storage_access_queries( + full_result + .storage_log_queries + .iter() + .map(|log| &log.log_query), + ); + track_l1_batch_execution_stage("log_deduplication", &mut stage_started_at); + + let (l1_tx_count, l2_tx_count) = + extractors::l1_l2_tx_count(&updates_manager.l1_batch.executed_transactions); + vlog::info!( + "sealing l1 batch {:?} with {:?} ({:?} l2 + {:?} l1) txs, {:?} l2_l1_logs, {:?} events, (writes, reads): {:?} , (writes_dedup, reads_dedup): {:?} ", + current_l1_batch_number, + l1_tx_count + l2_tx_count, + l2_tx_count, + l1_tx_count, + full_result.l2_to_l1_logs.len(), + full_result.events.len(), + extractors::storage_log_query_write_read_counts(&full_result.storage_log_queries), + extractors::log_query_write_read_counts(&deduped_log_queries), + ); + + let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( + &mut transaction, + current_l1_batch_number, + ); + let block_context_properties = BlockContextMode::NewBlock(block_context, hash); + + let l1_batch = L1BatchHeader { + number: current_l1_batch_number, + is_finished: true, + timestamp: block_context.context.block_timestamp, + fee_account_address: fee_account, + priority_ops_onchain_data: updates_manager.l1_batch.priority_ops_onchain_data.clone(), + l1_tx_count: l1_tx_count as u16, + l2_tx_count: l2_tx_count as u16, + l2_to_l1_logs: full_result.l2_to_l1_logs, + l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), + bloom: Default::default(), + initial_bootloader_contents: extractors::get_initial_bootloader_memory( + &updates_manager.l1_batch, + block_context_properties, + ), + used_contract_hashes: full_result.used_contract_hashes, + base_fee_per_gas: block_context.base_fee, + l1_gas_price: updates_manager.l1_gas_price(), + l2_fair_gas_price: updates_manager.fair_l2_gas_price(), + base_system_contracts_hashes: updates_manager.base_system_contract_hashes(), + }; + + transaction + .blocks_dal() + .insert_l1_batch(l1_batch, updates_manager.l1_batch.l1_gas_count); + track_l1_batch_execution_stage("insert_l1_batch_header", &mut stage_started_at); + + transaction + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(current_l1_batch_number); + track_l1_batch_execution_stage("set_l1_batch_number_for_miniblocks", &mut stage_started_at); + + transaction + .transactions_dal() + .mark_txs_as_executed_in_l1_batch( + current_l1_batch_number, + &updates_manager.l1_batch.executed_transactions, + ); + track_l1_batch_execution_stage("mark_txs_as_executed_in_l1_batch", &mut stage_started_at); + + transaction + .storage_logs_dedup_dal() + .insert_storage_logs(current_l1_batch_number, &deduped_log_queries); + track_l1_batch_execution_stage("insert_storage_dedup_logs", &mut stage_started_at); + + let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries + .into_iter() + .partition(|log_query| log_query.rw_flag); + transaction + .storage_logs_dedup_dal() + .insert_protective_reads(current_l1_batch_number, &protective_reads); + track_l1_batch_execution_stage("insert_protective_reads", &mut stage_started_at); + + transaction + .storage_logs_dedup_dal() + .insert_initial_writes(current_l1_batch_number, &deduplicated_writes); + track_l1_batch_execution_stage("insert_initial_writes", &mut stage_started_at); + + transaction.commit_blocking(); + track_l1_batch_execution_stage("commit_l1_batch", &mut stage_started_at); + + let writes_metrics = updates_manager.storage_writes_deduplicator.metrics(); + // Sanity check. + assert_eq!( + deduplicated_writes.len(), + writes_metrics.initial_storage_writes + writes_metrics.repeated_storage_writes, + "Results of in-flight and common deduplications are mismatched" + ); + metrics::histogram!( + "server.state_keeper.l1_batch.initial_writes", + writes_metrics.initial_storage_writes as f64 + ); + metrics::histogram!( + "server.state_keeper.l1_batch.repeated_writes", + writes_metrics.repeated_storage_writes as f64 + ); + + metrics::histogram!( + "server.state_keeper.l1_batch.transactions_in_l1_batch", + updates_manager.l1_batch.executed_transactions.len() as f64 + ); + metrics::histogram!( + "server.l1_batch.latency", + ((millis_since_epoch() - block_context.context.block_timestamp as u128 * 1000) as f64) / 1000f64, + "stage" => "sealed" + ); + + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_time", + started_at.elapsed(), + ); + vlog::debug!( + "sealed l1 batch {} in {:?}", + current_l1_batch_number, + started_at.elapsed() + ); +} + +// Seal miniblock with the given number. +// +// If `is_fictive` flag is set to true, then it is assumed that we should seal a fictive miniblock with no transactions +// in it. It is needed because there might be some storage logs/events that are created after the last processed tx in +// l1 batch: after the last transaction is processed, bootloader enters the "tip" phase in which it can still generate +// events (e.g. one for sending fees to the operator). +pub(crate) fn seal_miniblock_impl( + current_miniblock_number: MiniblockNumber, + current_l1_batch_number: L1BatchNumber, + statistics: &mut StateKeeperStats, + storage: &mut StorageProcessor<'_>, + updates_manager: &UpdatesManager, + is_fictive: bool, +) { + miniblock_assertions(updates_manager, is_fictive); + + let started_at = Instant::now(); + let mut stage_started_at: Instant = Instant::now(); + + let (l1_tx_count, l2_tx_count) = + extractors::l1_l2_tx_count(&updates_manager.miniblock.executed_transactions); + vlog::info!( + "sealing miniblock {} (l1 batch {}) with {} ({} l2 + {} l1) txs, {} events, (writes, reads): {:?}", + current_miniblock_number, + current_l1_batch_number, + l1_tx_count + l2_tx_count, + l2_tx_count, + l1_tx_count, + updates_manager.miniblock.events.len(), + extractors::storage_log_query_write_read_counts(&updates_manager.miniblock.storage_logs), + ); + + let mut transaction = storage.start_transaction_blocking(); + let miniblock_header = MiniblockHeader { + number: current_miniblock_number, + timestamp: updates_manager.miniblock.timestamp, + hash: miniblock_hash(current_miniblock_number), + l1_tx_count: l1_tx_count as u16, + l2_tx_count: l2_tx_count as u16, + base_fee_per_gas: updates_manager.base_fee_per_gas(), + l1_gas_price: updates_manager.l1_gas_price(), + l2_fair_gas_price: updates_manager.fair_l2_gas_price(), + base_system_contracts_hashes: updates_manager.base_system_contract_hashes(), + }; + + transaction.blocks_dal().insert_miniblock(miniblock_header); + track_miniblock_execution_stage("insert_miniblock_header", &mut stage_started_at); + + transaction + .transactions_dal() + .mark_txs_as_executed_in_miniblock( + current_miniblock_number, + &updates_manager.miniblock.executed_transactions, + updates_manager.base_fee_per_gas().into(), + ); + track_miniblock_execution_stage("mark_transactions_in_miniblock", &mut stage_started_at); + + let storage_logs = extractors::log_queries_to_storage_logs( + &updates_manager.miniblock.storage_logs, + updates_manager, + is_fictive, + ); + let write_logs = extractors::write_logs_from_storage_logs(storage_logs); + + transaction + .storage_logs_dal() + .insert_storage_logs(current_miniblock_number, &write_logs); + track_miniblock_execution_stage("insert_storage_logs", &mut stage_started_at); + + let unique_updates = transaction.storage_dal().apply_storage_logs(&write_logs); + track_miniblock_execution_stage("apply_storage_logs", &mut stage_started_at); + + let new_factory_deps = updates_manager.miniblock.new_factory_deps.clone(); + if !new_factory_deps.is_empty() { + transaction + .storage_dal() + .insert_factory_deps(current_miniblock_number, new_factory_deps); + } + track_miniblock_execution_stage("insert_factory_deps", &mut stage_started_at); + + // Factory deps should be inserted before using `contracts_deployed_this_miniblock`. + let deployed_contracts = + extractors::contracts_deployed_this_miniblock(unique_updates, &mut transaction); + if !deployed_contracts.is_empty() { + statistics.num_contracts += deployed_contracts.len() as u64; + } + + let added_tokens = extract_added_tokens(&updates_manager.miniblock.events); + if !added_tokens.is_empty() { + transaction.tokens_dal().add_tokens(added_tokens); + } + track_miniblock_execution_stage("insert_tokens", &mut stage_started_at); + + let events_this_miniblock = extractors::extract_events_this_block( + &updates_manager.miniblock.events, + updates_manager, + is_fictive, + ); + transaction + .events_dal() + .save_events(current_miniblock_number, events_this_miniblock); + track_miniblock_execution_stage("insert_events", &mut stage_started_at); + + let l2_to_l1_logs_this_miniblock = extractors::extract_l2_to_l1_logs_this_block( + &updates_manager.miniblock.l2_to_l1_logs, + updates_manager, + is_fictive, + ); + transaction + .events_dal() + .save_l2_to_l1_logs(current_miniblock_number, l2_to_l1_logs_this_miniblock); + track_miniblock_execution_stage("insert_l2_to_l1_logs", &mut stage_started_at); + + transaction.commit_blocking(); + track_miniblock_execution_stage("commit_miniblock", &mut stage_started_at); + + metrics::histogram!( + "server.state_keeper.miniblock.transactions_in_miniblock", + updates_manager.miniblock.executed_transactions.len() as f64 + ); + metrics::histogram!( + "server.miniblock.latency", + ((millis_since_epoch() - updates_manager.miniblock.timestamp as u128 * 1000) as f64) / 1000f64, + "stage" => "sealed" + ); + metrics::histogram!( + "server.state_keeper.miniblock.sealed_time", + started_at.elapsed(), + ); + metrics::gauge!( + "server.miniblock.number", + current_miniblock_number.0 as f64, + "stage" => "sealed" + ); + + metrics::gauge!( + "server.state_keeper.storage_contracts_size", + statistics.num_contracts as f64 + ); + vlog::debug!( + "sealed miniblock {} in {:?}", + current_miniblock_number, + started_at.elapsed() + ); + + track_miniblock_execution_stage( + "apply_miniblock_updates_to_l1_batch_updates_accumulator", + &mut stage_started_at, + ); +} + +/// Performs several sanity checks to make sure that the miniblock is valid. +fn miniblock_assertions(updates_manager: &UpdatesManager, is_fictive: bool) { + if is_fictive { + assert!(updates_manager.miniblock.executed_transactions.is_empty()); + } else { + assert!(!updates_manager.miniblock.executed_transactions.is_empty()); + } + + let first_tx_index_in_miniblock = updates_manager.l1_batch.executed_transactions.len(); + let next_tx_index = updates_manager.pending_executed_transactions_len(); + let miniblock_tx_index_range = if is_fictive { + next_tx_index..(next_tx_index + 1) + } else { + first_tx_index_in_miniblock..next_tx_index + }; + + for event in updates_manager.miniblock.events.iter() { + assert!(miniblock_tx_index_range.contains(&(event.location.1 as usize))) + } + for storage_log in updates_manager.miniblock.storage_logs.iter() { + assert!( + miniblock_tx_index_range.contains(&(storage_log.log_query.tx_number_in_block as usize)) + ) + } +} + +fn track_l1_batch_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_time_stage", + stage_started_at.elapsed(), + "stage" => stage + ); + *stage_started_at = Instant::now(); +} + +fn track_miniblock_execution_stage(stage: &'static str, stage_started_at: &mut Instant) { + metrics::histogram!( + "server.state_keeper.miniblock.sealed_time_stage", + stage_started_at.elapsed(), + "stage" => stage + ); + *stage_started_at = Instant::now(); +} diff --git a/core/bin/zksync_core/src/state_keeper/keeper.rs b/core/bin/zksync_core/src/state_keeper/keeper.rs index 911c42fea899..e8340c6fdd3e 100644 --- a/core/bin/zksync_core/src/state_keeper/keeper.rs +++ b/core/bin/zksync_core/src/state_keeper/keeper.rs @@ -2,14 +2,17 @@ use std::time::Duration; use tokio::sync::watch::Receiver; -use vm::{ - vm_with_bootloader::BlockContextMode, zk_evm::block_properties::BlockProperties, TxRevertReason, +use vm::transaction_data::TransactionData; +use vm::TxRevertReason; +use zksync_types::{ + storage_writes_deduplicator::StorageWritesDeduplicator, MiniblockNumber, Transaction, }; -use zksync_types::{MiniblockNumber, Transaction}; +use zksync_utils::time::millis_since_epoch; +use crate::gas_tracker::gas_count_from_writes; use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, L1BatchExecutorBuilder, TxExecutionResult}, - io::{PendingBatchData, StateKeeperIO}, + io::{L1BatchParams, PendingBatchData, StateKeeperIO}, seal_criteria::{SealManager, SealResolution}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, @@ -17,7 +20,7 @@ use crate::state_keeper::{ /// Amount of time to block on waiting for some resource. The exact value is not really important, /// we only need it to not block on waiting indefinitely and be able to process cancellation requests. -const POLL_WAIT_DURATION: Duration = Duration::from_secs(1); +pub(super) const POLL_WAIT_DURATION: Duration = Duration::from_secs(1); /// Structure used to indicate that task cancellation was requested. #[derive(Debug)] @@ -33,7 +36,7 @@ struct Canceled; /// You can think of it as a state machine that runs over a sequence of incoming transactions, turning them into /// a sequence of executed miniblocks and batches. #[derive(Debug)] -pub(crate) struct ZkSyncStateKeeper { +pub struct ZkSyncStateKeeper { stop_receiver: Receiver, io: Box, batch_executor_base: Box, @@ -41,7 +44,7 @@ pub(crate) struct ZkSyncStateKeeper { } impl ZkSyncStateKeeper { - pub(crate) fn new( + pub fn new( stop_receiver: Receiver, io: Box, batch_executor_base: Box, @@ -101,13 +104,14 @@ impl ZkSyncStateKeeper { } }; - let (mut block_context, mut block_properties) = params; + let mut l1_batch_params = params; - let mut updates_manager = UpdatesManager::new(&block_context); + let mut updates_manager = UpdatesManager::new( + &l1_batch_params.context_mode, + l1_batch_params.base_system_contracts.hashes(), + ); - let mut batch_executor = self - .batch_executor_base - .init_batch(block_context, block_properties); + let mut batch_executor = self.batch_executor_base.init_batch(l1_batch_params.clone()); self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute); loop { @@ -118,22 +122,26 @@ impl ZkSyncStateKeeper { // Finish current batch. if !updates_manager.miniblock.executed_transactions.is_empty() { - let new_timestamp = self.io.seal_miniblock(&updates_manager); - updates_manager.seal_miniblock(new_timestamp); + self.io.seal_miniblock(&updates_manager); + // We've sealed the miniblock that we had, but we still need to setup the timestamp for the + // fictive miniblock. + let fictive_miniblock_timestamp = self.wait_for_new_miniblock_params()?; + updates_manager.seal_miniblock(fictive_miniblock_timestamp); } let block_result = batch_executor.finish_batch(); self.io.seal_l1_batch( block_result, updates_manager, - block_context.inner_block_context(), + l1_batch_params.context_mode.inner_block_context(), ); // Start the new batch. - (block_context, block_properties) = self.wait_for_new_batch_params()?; - updates_manager = UpdatesManager::new(&block_context); - batch_executor = self - .batch_executor_base - .init_batch(block_context, block_properties); + l1_batch_params = self.wait_for_new_batch_params()?; + updates_manager = UpdatesManager::new( + &l1_batch_params.context_mode, + l1_batch_params.base_system_contracts.hashes(), + ); + batch_executor = self.batch_executor_base.init_batch(l1_batch_params.clone()); } } @@ -144,9 +152,7 @@ impl ZkSyncStateKeeper { Ok(()) } - fn wait_for_new_batch_params( - &mut self, - ) -> Result<(BlockContextMode, BlockProperties), Canceled> { + fn wait_for_new_batch_params(&mut self) -> Result { let params = loop { if let Some(params) = self.io.wait_for_new_batch_params(POLL_WAIT_DURATION) { break params; @@ -156,6 +162,16 @@ impl ZkSyncStateKeeper { Ok(params) } + fn wait_for_new_miniblock_params(&mut self) -> Result { + let params = loop { + if let Some(params) = self.io.wait_for_new_miniblock_params(POLL_WAIT_DURATION) { + break params; + } + self.check_if_cancelled()?; + }; + Ok(params) + } + /// Applies the "pending state" on the `UpdatesManager`. /// Pending state means transactions that were executed before the server restart. Before we continue processing the /// batch, we need to restore the state. We must ensure that every transaction is executed successfully. @@ -165,8 +181,6 @@ impl ZkSyncStateKeeper { updates_manager: &mut UpdatesManager, txs_to_reexecute: Vec<(MiniblockNumber, Vec)>, ) { - let mut total_batch_updated_slots = 0; - for (miniblock_number, txs) in txs_to_reexecute { vlog::info!( "Starting to reexecute transactions from sealed miniblock {}", @@ -186,22 +200,20 @@ impl ZkSyncStateKeeper { let tx_execution_status = tx_execution_result.status; let ExecutionMetricsForCriteria { - storage_updates: storage_updates_this_tx, l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, } = result.tx_metrics.unwrap(); - total_batch_updated_slots += storage_updates_this_tx; updates_manager.extend_from_executed_transaction( &tx, tx_execution_result, + result.compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); vlog::debug!( "finished reexecuting tx {} by {} (is_l1: {}) (#{} in l1 batch {}) \ - (#{} in miniblock {}) status: {:?}. New modified storage slots: {}, \ - total in l1 batch: {}, L1 gas spent: {:?}, total in l1 batch: {:?}, \ + (#{} in miniblock {}) status: {:?}. L1 gas spent: {:?}, total in l1 batch: {:?}, \ tx execution metrics: {:?}, block execution metrics: {:?}", tx.hash(), tx.initiator_account(), @@ -211,8 +223,6 @@ impl ZkSyncStateKeeper { updates_manager.miniblock.executed_transactions.len(), miniblock_number, tx_execution_status, - storage_updates_this_tx, - total_batch_updated_slots, tx_l1_gas_this_tx, updates_manager.pending_l1_gas_count(), &tx_execution_metrics, @@ -220,9 +230,11 @@ impl ZkSyncStateKeeper { ); } - // It's OK to use substitute values here even though we're re-executing the old blocks, - // since the correct values are already persisted in the DB and won't be overwritten. - updates_manager.seal_miniblock(updates_manager.batch_timestamp()); + // For old miniblocks that we reexecute the correct timestamps are already persisted in the DB and won't be overwritten. + // However, `seal_miniblock` method of `UpdatesManager` takes the only parameter `new_miniblock_timstamp` + // that will be used as a timestamp for the next sealed miniblock. + // So, we should care about passing the correct timestamp for miniblock that comes after the pending batch. + updates_manager.seal_miniblock((millis_since_epoch() / 1000) as u64); } } @@ -239,6 +251,11 @@ impl ZkSyncStateKeeper { { return Ok(()); } + if self.sealer.should_seal_miniblock(updates_manager) { + self.io.seal_miniblock(updates_manager); + let new_timestamp = self.wait_for_new_miniblock_params()?; + updates_manager.seal_miniblock(new_timestamp); + } let Some(tx) = self.io.wait_for_next_tx(POLL_WAIT_DURATION) else { vlog::trace!("No new transactions. Waiting!"); continue; @@ -257,13 +274,10 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( &tx, exec_result.tx_result.unwrap(), + exec_result.compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); - if self.sealer.should_seal_miniblock(updates_manager) { - let new_timestamp = self.io.seal_miniblock(updates_manager); - updates_manager.seal_miniblock(new_timestamp); - } } SealResolution::IncludeAndSeal => { let ExecutionMetricsForCriteria { @@ -274,6 +288,7 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( &tx, exec_result.tx_result.unwrap(), + exec_result.compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); @@ -297,12 +312,14 @@ impl ZkSyncStateKeeper { /// Executes one transaction in the batch executor, and then decides whether the batch should be sealed. /// Batch may be sealed because of one of the following reasons: /// 1. The VM entered an incorrect state (e.g. out of gas). In that case, we must revert the transaction and seal - /// the blcok. + /// the block. /// 2. Seal manager decided that batch is ready to be sealed. + /// Note: this method doesn't mutate `updates_manager` in the end. However, reference should be mutable + /// because we use `apply_and_rollback` method of `updates_manager.storage_writes_deduplicator`. fn process_one_tx( &mut self, batch_executor: &BatchExecutorHandle, - updates_manager: &UpdatesManager, + updates_manager: &mut UpdatesManager, tx: &Transaction, ) -> (SealResolution, TxExecutionResult) { let exec_result = batch_executor.execute_tx(tx.clone()); @@ -311,6 +328,7 @@ impl ZkSyncStateKeeper { bootloader_dry_run_result, tx_metrics, bootloader_dry_run_metrics, + .. } = exec_result.clone(); match tx_result { @@ -329,14 +347,13 @@ impl ZkSyncStateKeeper { Ok(tx_execution_result) => { let tx_execution_status = tx_execution_result.status; let ExecutionMetricsForCriteria { - storage_updates: storage_updates_this_tx, l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, } = tx_metrics.unwrap(); vlog::debug!( "finished tx {:?} by {:?} (is_l1: {}) (#{} in l1 batch {}) (#{} in miniblock {}) \ - status: {:?}. New modified storage slots: {}, L1 gas spent: {:?}, total in l1 batch: {:?}, \ + status: {:?}. L1 gas spent: {:?}, total in l1 batch: {:?}, \ tx execution metrics: {:?}, block execution metrics: {:?}", tx.hash(), tx.initiator_account(), @@ -346,22 +363,24 @@ impl ZkSyncStateKeeper { updates_manager.miniblock.executed_transactions.len() + 1, self.io.current_miniblock_number().0, tx_execution_status, - storage_updates_this_tx, tx_l1_gas_this_tx, updates_manager.pending_l1_gas_count() + tx_l1_gas_this_tx, &tx_execution_metrics, updates_manager.pending_execution_metrics() + tx_execution_metrics, ); - if bootloader_dry_run_result.unwrap().is_err() { - // Exclude and seal. - metrics::increment_counter!( - "server.tx_aggregation.reason", - "criterion" => "bootloader_block_tip_failed", - "seal_resolution" => "exclude_and_seal", - ); - return (SealResolution::ExcludeAndSeal, exec_result); - } + let bootloader_dry_run_result = + if let Ok(bootloader_dry_run_result) = bootloader_dry_run_result.unwrap() { + bootloader_dry_run_result + } else { + // Exclude and seal. + metrics::increment_counter!( + "server.tx_aggregation.reason", + "criterion" => "bootloader_block_tip_failed", + "seal_resolution" => "exclude_and_seal", + ); + return (SealResolution::ExcludeAndSeal, exec_result); + }; let ExecutionMetricsForCriteria { l1_gas: finish_block_l1_gas, @@ -369,6 +388,24 @@ impl ZkSyncStateKeeper { .. } = bootloader_dry_run_metrics.unwrap(); + let tx_data: TransactionData = tx.clone().into(); + let encoding_len = tx_data.into_tokens().len(); + + let logs_to_apply_iter = tx_execution_result + .result + .logs + .storage_logs + .iter() + .chain(&bootloader_dry_run_result.logs.storage_logs); + let block_writes_metrics = updates_manager + .storage_writes_deduplicator + .apply_and_rollback(logs_to_apply_iter.clone()); + let block_writes_l1_gas = gas_count_from_writes(&block_writes_metrics); + + let tx_writes_metrics = + StorageWritesDeduplicator::apply_on_empty_state(logs_to_apply_iter); + let tx_writes_l1_gas = gas_count_from_writes(&tx_writes_metrics); + let resolution = self.sealer.should_seal_l1_batch( self.io.current_l1_batch_number().0, updates_manager.batch_timestamp() as u128 * 1000, @@ -379,8 +416,13 @@ impl ZkSyncStateKeeper { tx_execution_metrics + finish_block_execution_metrics, updates_manager.pending_l1_gas_count() + tx_l1_gas_this_tx - + finish_block_l1_gas, - tx_l1_gas_this_tx + finish_block_l1_gas, + + finish_block_l1_gas + + block_writes_l1_gas, + tx_l1_gas_this_tx + finish_block_l1_gas + tx_writes_l1_gas, + updates_manager.pending_txs_encoding_size() + encoding_len, + encoding_len, + block_writes_metrics, + tx_writes_metrics, ); (resolution, exec_result) diff --git a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs index 73ae4e280a1a..27c6e35b0793 100644 --- a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs @@ -35,6 +35,7 @@ impl MempoolFetcher { pool: ConnectionPool, remove_stuck_txs: bool, stuck_tx_timeout: Duration, + fair_l2_gas_price: u64, stop_receiver: watch::Receiver, ) { { @@ -56,7 +57,7 @@ impl MempoolFetcher { let started_at = Instant::now(); let mut storage = pool.access_storage().await; let mempool_info = self.mempool.get_mempool_info(); - let l2_tx_filter = self.gas_adjuster.l2_tx_filter(); + let l2_tx_filter = self.gas_adjuster.l2_tx_filter(fair_l2_gas_price); let (transactions, nonces) = storage.transactions_dal().sync_mempool( mempool_info.stashed_accounts, diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index 6d3214ae7390..11d8f51799b5 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -4,6 +4,7 @@ use tokio::sync::watch::Receiver; use zksync_config::constants::MAX_TXS_IN_BLOCK; use zksync_config::ZkSyncConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; use zksync_eth_client::EthInterface; @@ -12,20 +13,18 @@ use self::io::MempoolIO; use crate::gas_adjuster::GasAdjuster; use crate::state_keeper::seal_criteria::SealManager; -pub(crate) use self::{ - keeper::ZkSyncStateKeeper, mempool_actor::MempoolFetcher, types::MempoolGuard, -}; +pub use self::{keeper::ZkSyncStateKeeper, types::MempoolGuard}; -mod batch_executor; +pub(crate) mod batch_executor; mod extractors; -mod io; +pub(crate) mod io; mod keeper; pub(crate) mod mempool_actor; -pub(crate) mod seal_criteria; +pub mod seal_criteria; #[cfg(test)] mod tests; -mod types; -mod updates; +pub(crate) mod types; +pub(crate) mod updates; pub(crate) fn start_state_keeper( config: &ZkSyncConfig, @@ -47,13 +46,19 @@ where pool.clone(), config.chain.state_keeper.reexecute_each_tx, config.chain.state_keeper.max_allowed_l2_tx_gas_limit.into(), + config.chain.state_keeper.validation_computational_gas_limit, ); let io = MempoolIO::new( mempool, pool.clone(), config.chain.state_keeper.fee_account_addr, + config.chain.state_keeper.fair_l2_gas_price, config.chain.operations_manager.delay_interval(), gas_adjuster, + BaseSystemContractsHashes { + bootloader: config.chain.state_keeper.bootloader_hash, + default_aa: config.chain.state_keeper.default_aa_hash, + }, ); let sealer = SealManager::new(config.chain.state_keeper.clone()); diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs index 41fd4310d714..d5cf7205e6b1 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/function.rs @@ -1,6 +1,6 @@ pub(self) use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use super::{SealCriterion, SealResolution}; @@ -13,6 +13,10 @@ type CustomSealerFn = dyn Fn( ExecutionMetrics, BlockGasCount, BlockGasCount, + usize, + usize, + DeduplicatedWritesMetrics, + DeduplicatedWritesMetrics, ) -> SealResolution + Send + 'static; @@ -37,6 +41,10 @@ impl SealCriterion for FnCriterion { tx_execution_metrics: ExecutionMetrics, block_gas_count: BlockGasCount, tx_gas_count: BlockGasCount, + block_included_txs_size: usize, + tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { self.0( config, @@ -46,6 +54,10 @@ impl SealCriterion for FnCriterion { tx_execution_metrics, block_gas_count, tx_gas_count, + block_included_txs_size, + tx_size, + block_writes_metrics, + tx_writes_metrics, ) } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs index 80e19d8d39a2..e8b9e0389b0b 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/gas.rs @@ -1,7 +1,7 @@ use super::{SealCriterion, SealResolution, StateKeeperConfig}; use crate::gas_tracker::new_block_gas_count; use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; /// This is a temporary solution /// Instead of checking for gas it simply checks that the contracts' @@ -22,6 +22,10 @@ impl SealCriterion for GasCriterion { _tx_execution_metrics: ExecutionMetrics, block_gas_count: BlockGasCount, tx_gas_count: BlockGasCount, + _block_included_txs_size: usize, + _tx_size: usize, + _block_writes_metrics: DeduplicatedWritesMetrics, + _tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { if (tx_gas_count + new_block_gas_count()).has_greater_than( (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() as u32, @@ -64,6 +68,10 @@ mod tests { Default::default(), empty_block_gas, Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); let tx_gas = BlockGasCount { @@ -80,6 +88,10 @@ mod tests { Default::default(), empty_block_gas + tx_gas, tx_gas, + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!( huge_transaction_resolution, @@ -106,6 +118,10 @@ mod tests { Default::default(), empty_block_gas + tx_gas, tx_gas, + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(resolution_after_first_tx, SealResolution::NoSeal); @@ -144,6 +160,10 @@ mod tests { Default::default(), block_gas, tx_gas, + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(resolution_after_first_tx, SealResolution::IncludeAndSeal); @@ -167,6 +187,10 @@ mod tests { Default::default(), empty_block_gas + tx_gas + tx_gas, tx_gas, + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(resolution_after_first_tx, SealResolution::ExcludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs index 63a28c8f129b..7b2e3fbba9c5 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/geometry_seal_criteria.rs @@ -2,7 +2,10 @@ use std::fmt::Debug; use vm::MAX_CYCLES_FOR_TX; use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::circuit::GEOMETRY_CONFIG; -use zksync_types::{block::BlockGasCount, tx::ExecutionMetrics}; +use zksync_types::{ + block::BlockGasCount, + tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, +}; // Local uses use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; @@ -21,7 +24,7 @@ pub struct MaxCyclesCriterion; trait MetricExtractor { const PROM_METRIC_CRITERION_NAME: &'static str; fn limit_per_block() -> usize; - fn extract(metric: &ExecutionMetrics) -> usize; + fn extract(metric: &ExecutionMetrics, writes: &DeduplicatedWritesMetrics) -> usize; } impl SealCriterion for T @@ -37,15 +40,21 @@ where tx_execution_metrics: ExecutionMetrics, _block_gas_count: BlockGasCount, _tx_gas_count: BlockGasCount, + _block_included_txs_size: usize, + _tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { - if T::extract(&tx_execution_metrics) + if T::extract(&tx_execution_metrics, &tx_writes_metrics) > (T::limit_per_block() as f64 * config.reject_tx_at_geometry_percentage).round() as usize { SealResolution::Unexecutable("ZK proof cannot be generated for a transaction".into()) - } else if T::extract(&block_execution_metrics) >= T::limit_per_block() { + } else if T::extract(&block_execution_metrics, &block_writes_metrics) + >= T::limit_per_block() + { SealResolution::ExcludeAndSeal - } else if T::extract(&block_execution_metrics) + } else if T::extract(&block_execution_metrics, &block_writes_metrics) > (T::limit_per_block() as f64 * config.close_block_at_geometry_percentage).round() as usize { @@ -67,7 +76,7 @@ impl MetricExtractor for BytecodeHashesCriterion { GEOMETRY_CONFIG.limit_for_code_decommitter_sorter as usize } - fn extract(metrics: &ExecutionMetrics) -> usize { + fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { metrics.contracts_used } } @@ -79,8 +88,8 @@ impl MetricExtractor for RepeatedWritesCriterion { GEOMETRY_CONFIG.limit_for_repeated_writes_pubdata_hasher as usize } - fn extract(metrics: &ExecutionMetrics) -> usize { - metrics.repeated_storage_writes + fn extract(_metrics: &ExecutionMetrics, writes: &DeduplicatedWritesMetrics) -> usize { + writes.repeated_storage_writes } } @@ -91,8 +100,8 @@ impl MetricExtractor for InitialWritesCriterion { GEOMETRY_CONFIG.limit_for_initial_writes_pubdata_hasher as usize } - fn extract(metrics: &ExecutionMetrics) -> usize { - metrics.initial_storage_writes + fn extract(_metrics: &ExecutionMetrics, writes: &DeduplicatedWritesMetrics) -> usize { + writes.initial_storage_writes } } @@ -103,7 +112,7 @@ impl MetricExtractor for MaxCyclesCriterion { MAX_CYCLES_FOR_TX as usize } - fn extract(metrics: &ExecutionMetrics) -> usize { + fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { metrics.cycles_used as usize } } @@ -111,6 +120,7 @@ impl MetricExtractor for MaxCyclesCriterion { #[cfg(test)] mod tests { use zksync_config::configs::chain::StateKeeperConfig; + use zksync_types::tx::tx_execution_info::DeduplicatedWritesMetrics; use zksync_types::tx::ExecutionMetrics; use crate::state_keeper::seal_criteria::geometry_seal_criteria::MaxCyclesCriterion; @@ -130,6 +140,7 @@ mod tests { fn test_no_seal_block_resolution( block_execution_metrics: ExecutionMetrics, + block_writes_metrics: DeduplicatedWritesMetrics, criterion: &dyn SealCriterion, ) { let config = get_config(); @@ -141,12 +152,17 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + block_writes_metrics, + Default::default(), ); assert_eq!(block_resolution, SealResolution::NoSeal); } fn test_include_and_seal_block_resolution( block_execution_metrics: ExecutionMetrics, + block_writes_metrics: DeduplicatedWritesMetrics, criterion: &dyn SealCriterion, ) { let config = get_config(); @@ -158,12 +174,17 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + block_writes_metrics, + Default::default(), ); assert_eq!(block_resolution, SealResolution::IncludeAndSeal); } fn test_exclude_and_seal_block_resolution( block_execution_metrics: ExecutionMetrics, + block_writes_metrics: DeduplicatedWritesMetrics, criterion: &dyn SealCriterion, ) { let config = get_config(); @@ -175,12 +196,17 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + block_writes_metrics, + Default::default(), ); assert_eq!(block_resolution, SealResolution::ExcludeAndSeal); } fn test_unexecutable_tx_resolution( tx_execution_metrics: ExecutionMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, criterion: &dyn SealCriterion, ) { let config = get_config(); @@ -192,6 +218,10 @@ mod tests { tx_execution_metrics, Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + tx_writes_metrics, ); assert_eq!( @@ -200,28 +230,37 @@ mod tests { ); } - macro_rules! test_scenario { + macro_rules! test_scenario_execution_metrics { ($criterion: tt, $metric_name: ident, $metric_type: ty) => { let config = get_config(); + let writes_metrics = DeduplicatedWritesMetrics::default(); let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block() / 2) as $metric_type, ..Default::default() }; - test_no_seal_block_resolution(block_execution_metrics, &$criterion); + test_no_seal_block_resolution(block_execution_metrics, writes_metrics, &$criterion); let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block() - 1) as $metric_type, ..Default::default() }; - test_include_and_seal_block_resolution(block_execution_metrics, &$criterion); + test_include_and_seal_block_resolution( + block_execution_metrics, + writes_metrics, + &$criterion, + ); let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block()) as $metric_type, ..Default::default() }; - test_exclude_and_seal_block_resolution(block_execution_metrics, &$criterion); + test_exclude_and_seal_block_resolution( + block_execution_metrics, + writes_metrics, + &$criterion, + ); let tx_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block() as f64 @@ -231,27 +270,71 @@ mod tests { ..Default::default() }; - test_unexecutable_tx_resolution(tx_execution_metrics, &$criterion); + test_unexecutable_tx_resolution(tx_execution_metrics, writes_metrics, &$criterion); + }; + } + + macro_rules! test_scenario_writes_metrics { + ($criterion: tt, $metric_name: ident, $metric_type: ty) => { + let config = get_config(); + let execution_metrics = ExecutionMetrics::default(); + let block_writes_metrics = DeduplicatedWritesMetrics { + $metric_name: ($criterion::limit_per_block() / 2) as $metric_type, + ..Default::default() + }; + test_no_seal_block_resolution(execution_metrics, block_writes_metrics, &$criterion); + + let block_writes_metrics = DeduplicatedWritesMetrics { + $metric_name: ($criterion::limit_per_block() - 1) as $metric_type, + ..Default::default() + }; + + test_include_and_seal_block_resolution( + execution_metrics, + block_writes_metrics, + &$criterion, + ); + + let block_writes_metrics = DeduplicatedWritesMetrics { + $metric_name: ($criterion::limit_per_block()) as $metric_type, + ..Default::default() + }; + + test_exclude_and_seal_block_resolution( + execution_metrics, + block_writes_metrics, + &$criterion, + ); + + let tx_writes_metrics = DeduplicatedWritesMetrics { + $metric_name: ($criterion::limit_per_block() as f64 + * config.reject_tx_at_geometry_percentage + + 1f64) + .round() as $metric_type, + ..Default::default() + }; + + test_unexecutable_tx_resolution(execution_metrics, tx_writes_metrics, &$criterion); }; } #[test] fn bytecode_hashes_criterion() { - test_scenario!(BytecodeHashesCriterion, contracts_used, usize); + test_scenario_execution_metrics!(BytecodeHashesCriterion, contracts_used, usize); } #[test] fn repeated_writes_seal_criterion() { - test_scenario!(RepeatedWritesCriterion, repeated_storage_writes, usize); + test_scenario_writes_metrics!(RepeatedWritesCriterion, repeated_storage_writes, usize); } #[test] fn initial_writes_seal_criterion() { - test_scenario!(InitialWritesCriterion, initial_storage_writes, usize); + test_scenario_writes_metrics!(InitialWritesCriterion, initial_storage_writes, usize); } #[test] fn initial_max_cycles_seal_criterion() { - test_scenario!(MaxCyclesCriterion, cycles_used, u32); + test_scenario_execution_metrics!(MaxCyclesCriterion, cycles_used, u32); } } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs index d626db6c780c..f8e0c9b8c5ad 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -12,18 +12,20 @@ use std::fmt::Debug; pub(self) use zksync_config::configs::chain::StateKeeperConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_utils::time::{millis_since, millis_since_epoch}; use super::updates::UpdatesManager; pub(crate) mod function; -mod gas; +pub(crate) mod gas; mod geometry_seal_criteria; mod pubdata_bytes; pub(crate) mod slots; mod timeout; +mod tx_encoding_size; /// Reported decision regarding block sealing. #[derive(Debug, Clone, PartialEq)] @@ -75,7 +77,7 @@ impl SealResolution { } } -pub(crate) trait SealCriterion: Debug + Send + 'static { +pub trait SealCriterion: Debug + Send + 'static { #[allow(clippy::too_many_arguments)] fn should_seal( &self, @@ -86,6 +88,10 @@ pub(crate) trait SealCriterion: Debug + Send + 'static { tx_execution_metrics: ExecutionMetrics, block_gas_count: BlockGasCount, tx_gas_count: BlockGasCount, + block_included_txs_size: usize, + tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution; // We need self here only for rust restrictions for creating an object from trait // https://doc.rust-lang.org/reference/items/traits.html#object-safety @@ -93,9 +99,9 @@ pub(crate) trait SealCriterion: Debug + Send + 'static { } /// Sealer function that returns a boolean. -type SealerFn = dyn Fn(&UpdatesManager) -> bool + Send; +pub type SealerFn = dyn Fn(&UpdatesManager) -> bool + Send; -pub(crate) struct SealManager { +pub struct SealManager { config: StateKeeperConfig, /// Primary sealers set that is used to check if batch should be sealed after executing a transaction. sealers: Vec>, @@ -118,7 +124,13 @@ impl SealManager { /// Creates a default pre-configured seal manager. pub(crate) fn new(config: StateKeeperConfig) -> Self { let sealers: Vec> = Self::get_default_sealers(); - let unconditional_sealer = Self::timeout_batch_sealer(config.block_commit_deadline_ms); + let unconditional_sealer = Self::timeout_and_code_hash_batch_sealer( + config.block_commit_deadline_ms, + BaseSystemContractsHashes { + bootloader: config.bootloader_hash, + default_aa: config.default_aa_hash, + }, + ); let miniblock_sealer = Self::timeout_miniblock_sealer(config.miniblock_commit_deadline_ms); Self::custom(config, sealers, unconditional_sealer, miniblock_sealer) @@ -126,7 +138,7 @@ impl SealManager { /// Allows to create a seal manager object from externally-defined sealers. /// Mostly useful for test configuration. - pub(crate) fn custom( + pub fn custom( config: StateKeeperConfig, sealers: Vec>, unconditional_sealer: Box, @@ -141,10 +153,19 @@ impl SealManager { } /// Creates a sealer function that would seal the batch because of the timeout. - fn timeout_batch_sealer(block_commit_deadline_ms: u64) -> Box { + pub(crate) fn timeout_and_code_hash_batch_sealer( + block_commit_deadline_ms: u64, + base_system_contracts_hashes: BaseSystemContractsHashes, + ) -> Box { Box::new(move |manager| { - let should_seal = millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; - if should_seal { + // Verify timestamp + let should_seal_timeout = + millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; + // Verify code hashes + let should_seal_code_hashes = + base_system_contracts_hashes != manager.base_system_contract_hashes(); + + if should_seal_timeout { metrics::increment_counter!( "server.tx_aggregation.reason", "criterion" => "no_txs_timeout" @@ -156,14 +177,31 @@ impl SealManager { millis_since_epoch() ); } - should_seal + + if should_seal_code_hashes { + metrics::increment_counter!( + "server.tx_aggregation.reason", + "criterion" => "different_code_hashes" + ); + vlog::info!( + "l1_batch_different_code_hashes_triggered without new txs \n + l1 batch code hashes: {:?} \n + expected code hashes {:?} ", + base_system_contracts_hashes, + manager.base_system_contract_hashes(), + ); + } + + should_seal_timeout || should_seal_code_hashes }) } /// Creates a sealer function that would seal the miniblock because of the timeout. + /// Will only trigger for the non-empty miniblocks. fn timeout_miniblock_sealer(miniblock_commit_deadline_ms: u64) -> Box { Box::new(move |manager| { - millis_since(manager.miniblock.timestamp) > miniblock_commit_deadline_ms + !manager.miniblock.executed_transactions.is_empty() + && millis_since(manager.miniblock.timestamp) > miniblock_commit_deadline_ms }) } @@ -177,6 +215,10 @@ impl SealManager { tx_execution_metrics: ExecutionMetrics, block_gas_count: BlockGasCount, tx_gas_count: BlockGasCount, + block_included_txs_size: usize, + tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { let mut final_seal_resolution = SealResolution::NoSeal; for sealer in &self.sealers { @@ -188,6 +230,10 @@ impl SealManager { tx_execution_metrics, block_gas_count, tx_gas_count, + block_included_txs_size, + tx_size, + block_writes_metrics, + tx_writes_metrics, ); match seal_resolution { SealResolution::IncludeAndSeal => { @@ -249,8 +295,12 @@ impl SealManager { } pub(crate) fn should_seal_miniblock(&self, updates_manager: &UpdatesManager) -> bool { - !updates_manager.miniblock.executed_transactions.is_empty() - && (self.miniblock_sealer)(updates_manager) + // Unlike with the L1 batch, we don't check the number of transactions in the miniblock, + // because we might want to seal the miniblock even if it's empty (e.g. on an external node, + // where we have to replicate the state of the main node, including the last (empty) miniblock of the batch). + // The check for the number of transactions is expected to be done, if relevant, in the `miniblock_sealer` + // directly. + (self.miniblock_sealer)(updates_manager) } pub(crate) fn get_default_sealers() -> Vec> { @@ -262,7 +312,100 @@ impl SealManager { Box::new(geometry_seal_criteria::InitialWritesCriterion), Box::new(geometry_seal_criteria::RepeatedWritesCriterion), Box::new(geometry_seal_criteria::MaxCyclesCriterion), + Box::new(tx_encoding_size::TxEncodingSizeCriterion), ]; sealers } } + +#[cfg(test)] +mod tests { + use vm::{ + vm::{VmPartialExecutionResult, VmTxExecutionResult}, + vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, + }; + use zksync_types::{ + l2::L2Tx, + tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, + Address, Nonce, H256, U256, + }; + use zksync_utils::time::seconds_since_epoch; + + use super::*; + + fn create_manager() -> UpdatesManager { + let block_context = BlockContextMode::NewBlock( + DerivedBlockContext { + context: BlockContext { + block_number: 0, + block_timestamp: 0, + l1_gas_price: 0, + fair_l2_gas_price: 0, + operator_address: Default::default(), + }, + base_fee: 0, + }, + 0.into(), + ); + UpdatesManager::new(&block_context, Default::default()) + } + + fn apply_tx_to_manager(manager: &mut UpdatesManager) { + let mut tx = L2Tx::new( + Default::default(), + Default::default(), + Nonce(0), + Default::default(), + Address::default(), + U256::zero(), + None, + Default::default(), + ); + tx.set_input(H256::random().0.to_vec(), H256::random()); + manager.extend_from_executed_transaction( + &tx.into(), + VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: VmPartialExecutionResult { + logs: VmExecutionLogs::default(), + revert_reason: None, + contracts_used: 0, + cycles_used: 0, + }, + gas_refunded: 0, + operator_suggested_refund: 0, + }, + Default::default(), + Default::default(), + Default::default(), + ); + } + + /// This test mostly exists to make sure that we can't seal empty miniblocks on the main node. + #[test] + fn timeout_miniblock_sealer() { + let timeout_miniblock_sealer = SealManager::timeout_miniblock_sealer(1000); + + let mut manager = create_manager(); + // Empty miniblock should not trigger. + manager.miniblock.timestamp = seconds_since_epoch() - 10; + assert!( + !timeout_miniblock_sealer(&manager), + "Empty miniblock shouldn't be sealed" + ); + + // Non-empty miniblock should trigger. + apply_tx_to_manager(&mut manager); + assert!( + timeout_miniblock_sealer(&manager), + "Non-empty miniblock with old timestamp should be sealed" + ); + + // Check the timestamp logic. + manager.miniblock.timestamp = seconds_since_epoch(); + assert!( + !timeout_miniblock_sealer(&manager), + "Non-empty miniblock with too recent timestamp shouldn't be sealed" + ); + } +} diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs index e690efdb97a5..ec7e067cf48a 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/pubdata_bytes.rs @@ -1,4 +1,4 @@ -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_types::{block::BlockGasCount, MAX_PUBDATA_PER_L1_BATCH}; use super::{SealCriterion, SealResolution, StateKeeperConfig}; @@ -16,11 +16,16 @@ impl SealCriterion for PubDataBytesCriterion { tx_execution_metrics: ExecutionMetrics, _block_gas_count: BlockGasCount, _tx_gas_count: BlockGasCount, + _block_included_txs_size: usize, + _tx_size: usize, + block_writes_metrics: DeduplicatedWritesMetrics, + tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { let max_pubdata_per_l1_batch = MAX_PUBDATA_PER_L1_BATCH as usize; - let block_size = block_execution_metrics.size(); - if tx_execution_metrics.size() + let block_size = block_execution_metrics.size() + block_writes_metrics.size(); + let tx_size = tx_execution_metrics.size() + tx_writes_metrics.size(); + if tx_size > (max_pubdata_per_l1_batch as f64 * config.reject_tx_at_eth_params_percentage).round() as usize { @@ -32,7 +37,6 @@ impl SealCriterion for PubDataBytesCriterion { } else if block_size > (max_pubdata_per_l1_batch as f64 * config.close_block_at_eth_params_percentage) .round() as usize - && block_size < max_pubdata_per_l1_batch { SealResolution::IncludeAndSeal } else { @@ -58,8 +62,6 @@ mod tests { let criterion = PubDataBytesCriterion; let block_execution_metrics = ExecutionMetrics { - initial_storage_writes: 0, - repeated_storage_writes: 0, contracts_deployed: 0, contracts_used: 0, gas_used: 0, @@ -83,12 +85,14 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); let block_execution_metrics = ExecutionMetrics { - initial_storage_writes: 0, - repeated_storage_writes: 0, contracts_deployed: 0, contracts_used: 0, gas_used: 0, @@ -112,12 +116,14 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); let block_execution_metrics = ExecutionMetrics { - initial_storage_writes: 0, - repeated_storage_writes: 0, contracts_deployed: 0, contracts_used: 0, gas_used: 0, @@ -137,6 +143,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(full_block_resolution, SealResolution::ExcludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs index eb9bb000479a..b0593260d17d 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/slots.rs @@ -1,6 +1,6 @@ use super::{SealCriterion, SealResolution, StateKeeperConfig}; use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; /// Checks whether we should seal the block because we've run out of transaction slots. #[derive(Debug)] @@ -16,6 +16,10 @@ impl SealCriterion for SlotsCriterion { _tx_execution_metrics: ExecutionMetrics, _block_gas_count: BlockGasCount, _tx_gas_count: BlockGasCount, + _block_included_txs_size: usize, + _tx_size: usize, + _block_writes_metrics: DeduplicatedWritesMetrics, + _tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { if tx_count >= config.transaction_slots { SealResolution::IncludeAndSeal @@ -48,6 +52,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(almost_full_block_resolution, SealResolution::NoSeal); @@ -59,6 +67,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs index 59e537428aa5..db2fa33efd7e 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/timeout.rs @@ -1,5 +1,5 @@ use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; use zksync_utils::time::millis_since_epoch; use super::{SealCriterion, SealResolution, StateKeeperConfig}; @@ -18,6 +18,10 @@ impl SealCriterion for TimeoutCriterion { _tx_execution_metrics: ExecutionMetrics, _block_gas_count: BlockGasCount, _tx_gas_count: BlockGasCount, + _block_included_txs_size: usize, + _tx_size: usize, + _block_writes_metrics: DeduplicatedWritesMetrics, + _tx_writes_metrics: DeduplicatedWritesMetrics, ) -> SealResolution { if tx_count == 0 { return SealResolution::NoSeal; @@ -62,6 +66,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); @@ -74,6 +82,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(no_timeout_resolution, SealResolution::NoSeal); @@ -85,6 +97,10 @@ mod tests { Default::default(), Default::default(), Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), ); assert_eq!(timeout_resolution, SealResolution::IncludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs new file mode 100644 index 000000000000..d4e200a2206b --- /dev/null +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/tx_encoding_size.rs @@ -0,0 +1,126 @@ +use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; +use zksync_types::block::BlockGasCount; +use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; + +use super::{SealCriterion, SealResolution, StateKeeperConfig}; + +#[derive(Debug)] +pub struct TxEncodingSizeCriterion; + +impl SealCriterion for TxEncodingSizeCriterion { + fn should_seal( + &self, + config: &StateKeeperConfig, + _block_open_timestamp_ms: u128, + _tx_count: usize, + _block_execution_metrics: ExecutionMetrics, + _tx_execution_metrics: ExecutionMetrics, + _block_gas_count: BlockGasCount, + _tx_gas_count: BlockGasCount, + block_included_txs_size: usize, + tx_size: usize, + _block_writes_metrics: DeduplicatedWritesMetrics, + _tx_writes_metrics: DeduplicatedWritesMetrics, + ) -> SealResolution { + if tx_size + > (BOOTLOADER_TX_ENCODING_SPACE as f64 * config.reject_tx_at_geometry_percentage) + .round() as usize + { + SealResolution::Unexecutable( + "Transaction cannot be included due to large encoding size".into(), + ) + } else if block_included_txs_size > BOOTLOADER_TX_ENCODING_SPACE as usize { + SealResolution::ExcludeAndSeal + } else if block_included_txs_size + > (BOOTLOADER_TX_ENCODING_SPACE as f64 * config.close_block_at_geometry_percentage) + .round() as usize + { + SealResolution::IncludeAndSeal + } else { + SealResolution::NoSeal + } + } + + fn prom_criterion_name(&self) -> &'static str { + "tx_encoding_size" + } +} + +#[cfg(test)] +mod tests { + use super::{ + SealCriterion, SealResolution, TxEncodingSizeCriterion, BOOTLOADER_TX_ENCODING_SPACE, + }; + use zksync_config::ZkSyncConfig; + + #[test] + fn seal_criterion() { + let config = ZkSyncConfig::from_env().chain.state_keeper; + let criterion = TxEncodingSizeCriterion; + + let empty_block_resolution = criterion.should_seal( + &config, + Default::default(), + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); + assert_eq!(empty_block_resolution, SealResolution::NoSeal); + + let unexecutable_resolution = criterion.should_seal( + &config, + Default::default(), + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + BOOTLOADER_TX_ENCODING_SPACE as usize + 1, + Default::default(), + Default::default(), + ); + assert_eq!( + unexecutable_resolution, + SealResolution::Unexecutable( + "Transaction cannot be included due to large encoding size".into() + ) + ); + + let exclude_and_seal_resolution = criterion.should_seal( + &config, + Default::default(), + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + BOOTLOADER_TX_ENCODING_SPACE as usize + 1, + 1, + Default::default(), + Default::default(), + ); + assert_eq!(exclude_and_seal_resolution, SealResolution::ExcludeAndSeal); + + let include_and_seal_resolution = criterion.should_seal( + &config, + Default::default(), + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + BOOTLOADER_TX_ENCODING_SPACE as usize, + 1, + Default::default(), + Default::default(), + ); + assert_eq!(include_and_seal_resolution, SealResolution::IncludeAndSeal); + } +} diff --git a/core/bin/zksync_core/src/state_keeper/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/tests/mod.rs index ea3bd16511a9..f0a46423e7fc 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/mod.rs @@ -1,15 +1,193 @@ +use std::{ + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Instant, +}; + +use crate::gas_tracker::constants::{ + BLOCK_COMMIT_BASE_COST, BLOCK_EXECUTE_BASE_COST, BLOCK_PROVE_BASE_COST, +}; +use once_cell::sync::Lazy; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_types::MiniblockNumber; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_types::{ + block::BlockGasCount, zk_evm::block_properties::BlockProperties, MiniblockNumber, +}; +use zksync_utils::{h256_to_u256, time::millis_since_epoch}; -use crate::state_keeper::seal_criteria::{slots::SlotsCriterion, SealManager}; +use crate::state_keeper::{ + seal_criteria::{gas::GasCriterion, slots::SlotsCriterion, SealManager}, + types::ExecutionMetricsForCriteria, +}; use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, TestScenario, }; +use super::keeper::POLL_WAIT_DURATION; + mod tester; +pub static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub fn default_block_properties() -> BlockProperties { + BlockProperties { + default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + } +} + +#[test] +fn sealed_by_number_of_txs() { + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(SlotsCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + ); + + let scenario = TestScenario::new(); + + scenario + .next_tx("First tx", random_tx(1), successful_exec()) + .miniblock_sealed("Miniblock 1") + .next_tx("Second tx", random_tx(2), successful_exec()) + .miniblock_sealed("Miniblock 2") + .batch_sealed("Batch 1") + .run(sealer); +} + +#[test] +fn sealed_by_gas() { + let config = StateKeeperConfig { + max_single_tx_gas: 62_002, + reject_tx_at_gas_percentage: 1.0, + close_block_at_gas_percentage: 0.5, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(GasCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + ); + + let mut execution_result = successful_exec(); + let l1_gas_per_tx = BlockGasCount { + commit: 1, // Both txs together with block_base_cost would bring it over the block 31_001 commit bound. + prove: 0, + execute: 0, + }; + execution_result.add_tx_metrics(ExecutionMetricsForCriteria { + l1_gas: l1_gas_per_tx, + execution_metrics: Default::default(), + }); + + TestScenario::new() + .next_tx("First tx", random_tx(1), execution_result.clone()) + .miniblock_sealed_with("Miniblock with a single tx", move |updates| { + assert_eq!( + updates.miniblock.l1_gas_count, + l1_gas_per_tx, + "L1 gas used by a miniblock should consist of the gas used by its txs" + ); + }) + .next_tx("Second tx", random_tx(1), execution_result) + .miniblock_sealed("Miniblock 2") + .batch_sealed_with("Batch sealed with both txs", |_, updates, _| { + assert_eq!( + updates.l1_batch.l1_gas_count, + BlockGasCount { + commit: BLOCK_COMMIT_BASE_COST + 2, + prove: BLOCK_PROVE_BASE_COST, + execute: BLOCK_EXECUTE_BASE_COST, + }, + "L1 gas used by a batch should consists of gas used by its txs + basic block gas cost" + ); + }) + .run(sealer); +} + +#[test] +fn sealed_by_gas_then_by_num_tx() { + let config = StateKeeperConfig { + max_single_tx_gas: 62_000, + reject_tx_at_gas_percentage: 1.0, + close_block_at_gas_percentage: 0.5, + transaction_slots: 3, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(GasCriterion), Box::new(SlotsCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + ); + + let mut execution_result = successful_exec(); + execution_result.add_tx_metrics(ExecutionMetricsForCriteria { + l1_gas: BlockGasCount { + commit: 1, + prove: 0, + execute: 0, + }, + execution_metrics: Default::default(), + }); + + // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. + TestScenario::new() + .next_tx("First tx", random_tx(1), execution_result) + .miniblock_sealed("Miniblock 1") + .batch_sealed("Batch 1") + .next_tx("Second tx", random_tx(2), successful_exec()) + .miniblock_sealed("Miniblock 2") + .next_tx("Third tx", random_tx(3), successful_exec()) + .miniblock_sealed("Miniblock 3") + .next_tx("Fourth tx", random_tx(4), successful_exec()) + .miniblock_sealed("Miniblock 4") + .batch_sealed("Batch 2") + .run(sealer); +} + +#[test] +fn batch_sealed_before_miniblock_does() { + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(SlotsCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 3), + ); + + let scenario = TestScenario::new(); + + // Miniblock sealer will not return true before the batch is sealed because the batch only has 2 txs. + scenario + .next_tx("First tx", random_tx(1), successful_exec()) + .next_tx("Second tx", random_tx(2), successful_exec()) + .miniblock_sealed_with("Miniblock with two txs", |updates| { + assert_eq!( + updates.miniblock.executed_transactions.len(), + 2, + "The miniblock should have 2 txs" + ); + }) + .batch_sealed("Batch 1") + .run(sealer); +} + #[test] fn basic_flow() { let config = StateKeeperConfig { @@ -98,6 +276,49 @@ fn bootloader_tip_out_of_gas_flow() { .run(sealer); } +#[test] +fn bootloader_config_has_been_updated() { + let config = StateKeeperConfig { + transaction_slots: 300, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![], + SealManager::timeout_and_code_hash_batch_sealer( + u64::MAX, + BaseSystemContractsHashes { + bootloader: Default::default(), + default_aa: Default::default(), + }, + ), + Box::new(|_| false), + ); + + let pending_batch = + pending_batch_data(vec![(MiniblockNumber(1), vec![random_tx(1), random_tx(2)])]); + + TestScenario::new() + .load_pending_batch(pending_batch) + .batch_sealed_with("Batch sealed with all 2 tx", |_, updates, _| { + assert_eq!( + updates.l1_batch.executed_transactions.len(), + 2, + "There should be 2 transactions in the batch" + ); + }) + .next_tx("Final tx of batch", random_tx(3), successful_exec()) + .miniblock_sealed("Miniblock with this tx sealed") + .batch_sealed_with("Batch sealed with all 1 tx", |_, updates, _| { + assert_eq!( + updates.l1_batch.executed_transactions.len(), + 1, + "There should be 1 transactions in the batch" + ); + }) + .run(sealer); +} + #[test] fn pending_batch_is_applied() { let config = StateKeeperConfig { @@ -116,6 +337,7 @@ fn pending_batch_is_applied() { (MiniblockNumber(2), vec![random_tx(2)]), ]); + // We configured state keeper to use different system contract hashes, so it must seal the pending batch immediately. TestScenario::new() .load_pending_batch(pending_batch) .next_tx("Final tx of batch", random_tx(3), successful_exec()) @@ -135,3 +357,141 @@ fn pending_batch_is_applied() { }) .run(sealer); } + +/// Unconditionally seal the batch without triggering specific criteria. +#[test] +fn unconditional_sealing() { + // Trigger to know when to seal the batch. + // Once miniblock with one tx would be sealed, trigger would allow batch to be sealed as well. + let batch_seal_trigger = Arc::new(AtomicBool::new(false)); + let batch_seal_trigger_checker = batch_seal_trigger.clone(); + let start = Instant::now(); + let seal_miniblock_after = POLL_WAIT_DURATION; // Seal after 2 state keeper polling duration intervals. + + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(SlotsCriterion)], + Box::new(move |_| batch_seal_trigger_checker.load(Ordering::Relaxed)), + Box::new(move |upd_manager| { + if upd_manager.pending_executed_transactions_len() != 0 + && start.elapsed() >= seal_miniblock_after + { + batch_seal_trigger.store(true, Ordering::Relaxed); + true + } else { + false + } + }), + ); + + TestScenario::new() + .next_tx("The only tx", random_tx(1), successful_exec()) + .no_txs_until_next_action("We don't give transaction to wait for miniblock to be sealed") + .miniblock_sealed("Miniblock is sealed with just one tx") + .no_txs_until_next_action("Still no tx") + .batch_sealed("Batch is sealed with just one tx") + .run(sealer); +} + +/// Checks the next miniblock sealed after pending batch has a correct timestamp +#[test] +fn miniblock_timestamp_after_pending_batch() { + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(SlotsCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + ); + + let pending_batch = pending_batch_data(vec![(MiniblockNumber(1), vec![random_tx(1)])]); + + let current_timestamp = (millis_since_epoch() / 1000) as u64; + + TestScenario::new() + .load_pending_batch(pending_batch) + .next_tx( + "First tx after pending batch", + random_tx(2), + successful_exec(), + ) + .miniblock_sealed_with("Miniblock with a single tx", move |updates| { + assert!( + updates.miniblock.timestamp >= current_timestamp, + "Timestamp cannot decrease" + ); + }) + .batch_sealed("Batch is sealed with two transactions") + .run(sealer); +} + +/// Makes sure that the timestamp doesn't decrease in consequent miniblocks. +/// +/// Timestamps are faked in the IO layer, so this test mostly makes sure that the state keeper doesn't substitute +/// any unexpected value on its own. +#[test] +fn time_is_monotonic() { + let timestamp_first_miniblock = Arc::new(AtomicU64::new(0u64)); // Time is faked in tests. + let timestamp_second_miniblock = timestamp_first_miniblock.clone(); + let timestamp_third_miniblock = timestamp_first_miniblock.clone(); + + let config = StateKeeperConfig { + transaction_slots: 2, + ..Default::default() + }; + let sealer = SealManager::custom( + config, + vec![Box::new(SlotsCriterion)], + Box::new(|_| false), + Box::new(|updates| updates.miniblock.executed_transactions.len() == 1), + ); + + let scenario = TestScenario::new(); + + scenario + .next_tx("First tx", random_tx(1), successful_exec()) + .miniblock_sealed_with("Miniblock 1", move |updates| { + let min_expected = timestamp_first_miniblock.load(Ordering::Relaxed); + let actual = updates.miniblock.timestamp; + assert!( + actual > min_expected, + "First miniblock: Timestamp cannot decrease. Expected at least {}, got {}", + min_expected, + actual + ); + timestamp_first_miniblock.store(updates.miniblock.timestamp, Ordering::Relaxed); + }) + .next_tx("Second tx", random_tx(2), successful_exec()) + .miniblock_sealed_with("Miniblock 2", move |updates| { + let min_expected = timestamp_second_miniblock.load(Ordering::Relaxed); + let actual = updates.miniblock.timestamp; + assert!( + actual > min_expected, + "Second miniblock: Timestamp cannot decrease. Expected at least {}, got {}", + min_expected, + actual + ); + timestamp_second_miniblock.store(updates.miniblock.timestamp, Ordering::Relaxed); + }) + .batch_sealed_with("Batch 1", move |_, updates, _| { + // Timestamp from the currently stored miniblock would be used in the fictive miniblock. + // It should be correct as well. + let min_expected = timestamp_third_miniblock.load(Ordering::Relaxed); + let actual = updates.miniblock.timestamp; + assert!( + actual > min_expected, + "Fictive miniblock: Timestamp cannot decrease. Expected at least {}, got {}", + min_expected, + actual + ); + timestamp_third_miniblock.store(updates.miniblock.timestamp, Ordering::Relaxed); + }) + .run(sealer); +} diff --git a/core/bin/zksync_core/src/state_keeper/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/tests/tester.rs index faed1f947c47..b9c4584e6898 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/tester.rs @@ -9,10 +9,8 @@ use assert_matches::assert_matches; use tokio::sync::watch; use vm::{ - utils::default_block_properties, vm::{VmPartialExecutionResult, VmTxExecutionResult}, vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - zk_evm::block_properties::BlockProperties, VmBlockResult, VmExecutionResult, }; use zksync_types::{ @@ -22,8 +20,9 @@ use zksync_types::{ use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, - io::{PendingBatchData, StateKeeperIO}, + io::{L1BatchParams, PendingBatchData, StateKeeperIO}, seal_criteria::SealManager, + tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, ZkSyncStateKeeper, @@ -65,6 +64,13 @@ impl TestScenario { self } + /// Configures scenario to repeatedly return `None` to tx requests until the next action from the scenario happens. + pub(crate) fn no_txs_until_next_action(mut self, description: &'static str) -> Self { + self.actions + .push_back(ScenarioItem::NoTxsUntilNextAction(description)); + self + } + /// Expect the state keeper to request a transaction from IO. /// Adds both a transaction and an outcome of this transaction (that would be returned to the state keeper from the /// batch executor). @@ -209,20 +215,21 @@ fn partial_execution_result() -> VmPartialExecutionResult { /// Creates a `TxExecutionResult` object denoting a successful tx execution. pub(crate) fn successful_exec() -> TxExecutionResult { - let mut result = TxExecutionResult::new(Ok(VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - gas_refunded: 0, - operator_suggested_refund: 0, - })); + let mut result = TxExecutionResult::new(Ok(( + VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: partial_execution_result(), + gas_refunded: 0, + operator_suggested_refund: 0, + }, + vec![], + ))); result.add_tx_metrics(ExecutionMetricsForCriteria { - storage_updates: Default::default(), l1_gas: Default::default(), execution_metrics: Default::default(), }); result.add_bootloader_result(Ok(partial_execution_result())); result.add_bootloader_metrics(ExecutionMetricsForCriteria { - storage_updates: Default::default(), l1_gas: Default::default(), execution_metrics: Default::default(), }); @@ -237,14 +244,16 @@ pub(crate) fn rejected_exec() -> TxExecutionResult { /// Creates a `TxExecutionResult` object denoting a transaction that was executed, but caused a bootloader tip out of /// gas error. pub(crate) fn bootloader_tip_out_of_gas() -> TxExecutionResult { - let mut result = TxExecutionResult::new(Ok(VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - gas_refunded: 0, - operator_suggested_refund: 0, - })); + let mut result = TxExecutionResult::new(Ok(( + VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: partial_execution_result(), + gas_refunded: 0, + operator_suggested_refund: 0, + }, + vec![], + ))); result.add_tx_metrics(ExecutionMetricsForCriteria { - storage_updates: Default::default(), l1_gas: Default::default(), execution_metrics: Default::default(), }); @@ -270,16 +279,19 @@ pub(crate) fn pending_batch_data( base_fee: 1, }; - let params = ( - BlockContextMode::NewBlock(derived_context, Default::default()), - block_properties, - ); + let params = L1BatchParams { + context_mode: BlockContextMode::NewBlock(derived_context, Default::default()), + properties: block_properties, + base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + }; PendingBatchData { params, txs } } #[allow(clippy::type_complexity, clippy::large_enum_variant)] // It's OK for tests. enum ScenarioItem { + /// Configures scenraio to repeatedly return `None` to tx requests until the next action from the scenario happens. + NoTxsUntilNextAction(&'static str), Tx(&'static str, Transaction, TxExecutionResult), Rollback(&'static str, Transaction), Reject(&'static str, Transaction, Option), @@ -296,6 +308,9 @@ enum ScenarioItem { impl std::fmt::Debug for ScenarioItem { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { + Self::NoTxsUntilNextAction(descr) => { + f.debug_tuple("NoTxsUntilNextAction").field(descr).finish() + } Self::Tx(descr, tx, result) => f .debug_tuple("Tx") .field(descr) @@ -382,11 +397,7 @@ impl TestBatchExecutorBuilder { } impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { - fn init_batch( - &self, - _block_context: BlockContextMode, - _block_properties: BlockProperties, - ) -> BatchExecutorHandle { + fn init_batch(&self, _l1batch_params: L1BatchParams) -> BatchExecutorHandle { let (commands_sender, commands_receiver) = mpsc::channel(); let executor = TestBatchExecutor::new( @@ -502,6 +513,9 @@ pub(crate) struct TestIO { miniblock_number: MiniblockNumber, fee_account: Address, scenario: TestScenario, + /// Internal flag that is being set if scenario was configured to return `None` to all the transaction + /// requests until some other action happens. + skipping_txs: bool, } impl TestIO { @@ -515,6 +529,7 @@ impl TestIO { miniblock_number: MiniblockNumber(1), fee_account: FEE_ACCOUNT, scenario, + skipping_txs: false, } } @@ -527,6 +542,12 @@ impl TestIO { } let action = self.scenario.actions.pop_front().unwrap(); + if matches!(action, ScenarioItem::NoTxsUntilNextAction(_)) { + self.skipping_txs = true; + // This is a mock item, so pop an actual one for the IO to process. + return self.pop_next_item(request); + } + // If that was a last action, tell the state keeper to stop after that. if self.scenario.actions.is_empty() { self.stop_sender.send(true).unwrap(); @@ -548,10 +569,7 @@ impl StateKeeperIO for TestIO { self.scenario.pending_batch.take() } - fn wait_for_new_batch_params( - &mut self, - _max_wait: Duration, - ) -> Option<(BlockContextMode, BlockProperties)> { + fn wait_for_new_batch_params(&mut self, _max_wait: Duration) -> Option { let block_properties = default_block_properties(); let previous_block_hash = U256::zero(); @@ -567,14 +585,30 @@ impl StateKeeperIO for TestIO { base_fee: 1, }; - Some(( - BlockContextMode::NewBlock(derived_context, previous_block_hash), - block_properties, - )) + Some(L1BatchParams { + context_mode: BlockContextMode::NewBlock(derived_context, previous_block_hash), + properties: block_properties, + base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), + }) + } + + fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { + Some(self.timestamp) } - fn wait_for_next_tx(&mut self, _max_wait: Duration) -> Option { + fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { let action = self.pop_next_item("wait_for_next_tx"); + + // Check whether we should ignore tx requests. + if self.skipping_txs { + // As per expectation, we should provide a delay given by the state keeper. + std::thread::sleep(max_wait); + // Return the action to the scenario as we don't use it. + self.scenario.actions.push_front(action); + return None; + } + + // We shouldn't, process normally. assert_matches!( action, ScenarioItem::Tx(_, _, _), @@ -596,6 +630,7 @@ impl StateKeeperIO for TestIO { tx, &expected_tx, "Incorrect transaction has been rolled back" ); + self.skipping_txs = false; } fn reject(&mut self, tx: &Transaction, error: &str) { @@ -615,9 +650,10 @@ impl StateKeeperIO for TestIO { error ); } + self.skipping_txs = false; } - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) -> u64 { + fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { let action = self.pop_next_item("seal_miniblock"); assert_matches!( action, @@ -630,7 +666,7 @@ impl StateKeeperIO for TestIO { } self.miniblock_number += 1; self.timestamp += 1; - self.timestamp + self.skipping_txs = false; } fn seal_l1_batch( @@ -653,5 +689,6 @@ impl StateKeeperIO for TestIO { self.miniblock_number += 1; // Seal the fictive miniblock. self.batch_number += 1; self.timestamp += 1; + self.skipping_txs = false; } } diff --git a/core/bin/zksync_core/src/state_keeper/types.rs b/core/bin/zksync_core/src/state_keeper/types.rs index c0e84497d2d2..f7b1b2424b66 100644 --- a/core/bin/zksync_core/src/state_keeper/types.rs +++ b/core/bin/zksync_core/src/state_keeper/types.rs @@ -45,7 +45,6 @@ impl MempoolGuard { #[derive(Debug, Clone, Copy, PartialEq)] pub struct ExecutionMetricsForCriteria { - pub storage_updates: usize, pub l1_gas: BlockGasCount, pub execution_metrics: ExecutionMetrics, } diff --git a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs index d16b56f337ae..50ba9c8efcfb 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs @@ -6,15 +6,13 @@ use zksync_types::tx::tx_execution_info::ExecutionMetrics; use zksync_types::{tx::TransactionExecutionResult, ExecuteTransactionCommon}; #[derive(Debug, Clone, PartialEq)] -pub(crate) struct L1BatchUpdates { +pub struct L1BatchUpdates { pub executed_transactions: Vec, pub priority_ops_onchain_data: Vec, pub block_execution_metrics: ExecutionMetrics, // how much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, - // We keep track on the number of modified storage keys to close the block by L1 gas - // Later on, we'll replace it with closing L2 blocks by gas. - pub modified_storage_keys_number: usize, + pub txs_encoding_size: usize, } impl L1BatchUpdates { @@ -24,7 +22,7 @@ impl L1BatchUpdates { priority_ops_onchain_data: Default::default(), block_execution_metrics: Default::default(), l1_gas_count: new_block_gas_count(), - modified_storage_keys_number: 0, + txs_encoding_size: 0, } } @@ -38,9 +36,9 @@ impl L1BatchUpdates { self.executed_transactions .extend(miniblock_updates.executed_transactions); - self.modified_storage_keys_number += miniblock_updates.modified_storage_keys_number; self.l1_gas_count += miniblock_updates.l1_gas_count; self.block_execution_metrics += miniblock_updates.block_execution_metrics; + self.txs_encoding_size += miniblock_updates.txs_encoding_size; } } @@ -48,9 +46,10 @@ impl L1BatchUpdates { mod tests { use super::*; use crate::gas_tracker::new_block_gas_count; + use vm::transaction_data::TransactionData; use vm::vm::{VmPartialExecutionResult, VmTxExecutionResult}; use zksync_types::tx::tx_execution_info::TxExecutionStatus; - use zksync_types::{l2::L2Tx, Address, Nonce, H256, U256}; + use zksync_types::{l2::L2Tx, Address, Nonce, Transaction, H256, U256}; #[test] fn apply_miniblock_with_empty_tx() { @@ -67,9 +66,10 @@ mod tests { ); tx.set_input(H256::random().0.to_vec(), H256::random()); + let tx: Transaction = tx.into(); miniblock_accumulator.extend_from_executed_transaction( - &tx.into(), + &tx, VmTxExecutionResult { status: TxExecutionStatus::Success, result: VmPartialExecutionResult { @@ -83,6 +83,7 @@ mod tests { }, Default::default(), Default::default(), + Default::default(), ); let mut l1_batch_accumulator = L1BatchUpdates::new(); @@ -90,20 +91,13 @@ mod tests { assert_eq!(l1_batch_accumulator.executed_transactions.len(), 1); assert_eq!(l1_batch_accumulator.l1_gas_count, new_block_gas_count()); - assert_eq!(l1_batch_accumulator.modified_storage_keys_number, 0); assert_eq!(l1_batch_accumulator.priority_ops_onchain_data.len(), 0); assert_eq!(l1_batch_accumulator.block_execution_metrics.l2_l1_logs, 0); + + let tx_data: TransactionData = tx.into(); assert_eq!( - l1_batch_accumulator - .block_execution_metrics - .initial_storage_writes, - 0 - ); - assert_eq!( - l1_batch_accumulator - .block_execution_metrics - .repeated_storage_writes, - 0 + l1_batch_accumulator.txs_encoding_size, + tx_data.into_tokens().len() ); } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs index 17a17651c179..042af683c221 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use vm::transaction_data::TransactionData; use vm::vm::VmTxExecutionResult; use zksync_types::block::BlockGasCount; use zksync_types::event::extract_bytecodes_marked_as_known; @@ -6,10 +7,10 @@ use zksync_types::l2_to_l1_log::L2ToL1Log; use zksync_types::tx::tx_execution_info::VmExecutionLogs; use zksync_types::tx::ExecutionMetrics; use zksync_types::{tx::TransactionExecutionResult, StorageLogQuery, Transaction, VmEvent, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; #[derive(Debug, Clone, PartialEq)] -pub(crate) struct MiniblockUpdates { +pub struct MiniblockUpdates { pub executed_transactions: Vec, pub events: Vec, pub storage_logs: Vec, @@ -18,9 +19,7 @@ pub(crate) struct MiniblockUpdates { // how much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, pub block_execution_metrics: ExecutionMetrics, - // We keep track on the number of modified storage keys to close the block by L1 gas - // Later on, we'll replace it with closing L2 blocks by gas. - pub modified_storage_keys_number: usize, + pub txs_encoding_size: usize, pub timestamp: u64, } @@ -35,7 +34,7 @@ impl MiniblockUpdates { new_factory_deps: Default::default(), l1_gas_count: Default::default(), block_execution_metrics: Default::default(), - modified_storage_keys_number: 0, + txs_encoding_size: 0, timestamp, } } @@ -52,6 +51,7 @@ impl MiniblockUpdates { tx_execution_result: VmTxExecutionResult, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: ExecutionMetrics, + compressed_bytecodes: Vec, ) { // Get bytecode hashes that were marked as known let saved_factory_deps = @@ -90,6 +90,7 @@ impl MiniblockUpdates { execution_status: tx_execution_result.status, refunded_gas: tx_execution_result.gas_refunded, operator_suggested_refund: tx_execution_result.operator_suggested_refund, + compressed_bytecodes, }); self.events.extend(tx_execution_result.result.logs.events); @@ -98,9 +99,11 @@ impl MiniblockUpdates { self.l2_to_l1_logs .extend(tx_execution_result.result.logs.l2_to_l1_logs); - self.modified_storage_keys_number += execution_metrics.storage_writes(); self.l1_gas_count += tx_l1_gas_this_tx; self.block_execution_metrics += execution_metrics; + + let tx_data: TransactionData = tx.clone().into(); + self.txs_encoding_size += tx_data.into_tokens().len(); } } @@ -126,9 +129,10 @@ mod tests { ); tx.set_input(H256::random().0.to_vec(), H256::random()); + let tx: Transaction = tx.into(); accumulator.extend_from_executed_transaction( - &tx.into(), + &tx, VmTxExecutionResult { status: TxExecutionStatus::Success, result: VmPartialExecutionResult { @@ -142,6 +146,7 @@ mod tests { }, Default::default(), Default::default(), + Default::default(), ); assert_eq!(accumulator.executed_transactions.len(), 1); @@ -149,16 +154,10 @@ mod tests { assert_eq!(accumulator.storage_logs.len(), 0); assert_eq!(accumulator.l2_to_l1_logs.len(), 0); assert_eq!(accumulator.l1_gas_count, Default::default()); - assert_eq!(accumulator.modified_storage_keys_number, 0); assert_eq!(accumulator.new_factory_deps.len(), 0); - assert_eq!( - accumulator.block_execution_metrics.initial_storage_writes, - 0 - ); - assert_eq!( - accumulator.block_execution_metrics.repeated_storage_writes, - 0 - ); assert_eq!(accumulator.block_execution_metrics.l2_l1_logs, 0); + + let tx_data: TransactionData = tx.into(); + assert_eq!(accumulator.txs_encoding_size, tx_data.into_tokens().len()); } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/mod.rs b/core/bin/zksync_core/src/state_keeper/updates/mod.rs index 6287a9af6f2d..75cb99a178f9 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/mod.rs @@ -1,10 +1,15 @@ use vm::{vm::VmTxExecutionResult, vm_with_bootloader::BlockContextMode}; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::ExecutionMetrics; -use zksync_types::Transaction; - -mod l1_batch_updates; -mod miniblock_updates; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_types::{ + block::BlockGasCount, + storage_writes_deduplicator::StorageWritesDeduplicator, + tx::tx_execution_info::{ExecutionMetrics, VmExecutionLogs}, + Transaction, +}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +pub mod l1_batch_updates; +pub mod miniblock_updates; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, miniblock_updates::MiniblockUpdates}; @@ -15,17 +20,22 @@ pub(crate) use self::{l1_batch_updates::L1BatchUpdates, miniblock_updates::Minib /// `UpdatesManager` manages the state of both of these accumulators to be consistent /// and provides information about the pending state of the current L1 batch. #[derive(Debug, Clone, PartialEq)] -pub(crate) struct UpdatesManager { +pub struct UpdatesManager { batch_timestamp: u64, l1_gas_price: u64, fair_l2_gas_price: u64, base_fee_per_gas: u64, + base_system_contract_hashes: BaseSystemContractsHashes, pub l1_batch: L1BatchUpdates, pub miniblock: MiniblockUpdates, + pub storage_writes_deduplicator: StorageWritesDeduplicator, } impl UpdatesManager { - pub(crate) fn new(block_context: &BlockContextMode) -> Self { + pub(crate) fn new( + block_context: &BlockContextMode, + base_system_contract_hashes: BaseSystemContractsHashes, + ) -> Self { let batch_timestamp = block_context.timestamp(); let context = block_context.inner_block_context().context; Self { @@ -33,8 +43,10 @@ impl UpdatesManager { l1_gas_price: context.l1_gas_price, fair_l2_gas_price: context.fair_l2_gas_price, base_fee_per_gas: block_context.inner_block_context().base_fee, + base_system_contract_hashes, l1_batch: L1BatchUpdates::new(), miniblock: MiniblockUpdates::new(batch_timestamp), + storage_writes_deduplicator: StorageWritesDeduplicator::new(), } } @@ -42,6 +54,10 @@ impl UpdatesManager { self.batch_timestamp } + pub(crate) fn base_system_contract_hashes(&self) -> BaseSystemContractsHashes { + self.base_system_contract_hashes + } + pub(crate) fn l1_gas_price(&self) -> u64 { self.l1_gas_price } @@ -58,15 +74,26 @@ impl UpdatesManager { &mut self, tx: &Transaction, tx_execution_result: VmTxExecutionResult, + compressed_bytecodes: Vec, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: ExecutionMetrics, ) { + self.storage_writes_deduplicator + .apply(&tx_execution_result.result.logs.storage_logs); self.miniblock.extend_from_executed_transaction( tx, tx_execution_result, tx_l1_gas_this_tx, execution_metrics, - ) + compressed_bytecodes, + ); + } + + pub(crate) fn extend_from_fictive_transaction(&mut self, vm_execution_logs: VmExecutionLogs) { + self.storage_writes_deduplicator + .apply(&vm_execution_logs.storage_logs); + self.miniblock + .extend_from_fictive_transaction(vm_execution_logs); } pub(crate) fn seal_miniblock(&mut self, new_miniblock_timestamp: u64) { @@ -89,6 +116,10 @@ impl UpdatesManager { self.l1_batch.block_execution_metrics + self.miniblock.block_execution_metrics } + pub(crate) fn pending_txs_encoding_size(&self) -> usize { + self.l1_batch.txs_encoding_size + self.miniblock.txs_encoding_size + } + pub(crate) fn get_tx_by_index(&self, index: usize) -> &Transaction { if index < self.l1_batch.executed_transactions.len() { &self.l1_batch.executed_transactions[index].transaction @@ -126,7 +157,7 @@ mod tests { }, 0.into(), ); - let mut updates_manager = UpdatesManager::new(&block_context); + let mut updates_manager = UpdatesManager::new(&block_context, Default::default()); assert_eq!(updates_manager.pending_executed_transactions_len(), 0); // Apply tx. @@ -154,6 +185,7 @@ mod tests { gas_refunded: 0, operator_suggested_refund: 0, }, + vec![], new_block_gas_count(), Default::default(), ); diff --git a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs new file mode 100644 index 000000000000..316c19176e9a --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs @@ -0,0 +1,32 @@ +use super::ActionQueue; + +/// The task that keeps checking for the new batch status changes and persists them in the database. +pub fn run_batch_status_updater(actions: ActionQueue) { + loop { + let changes = actions.take_status_changes(); + for change in changes.commit { + vlog::info!( + "Commit status change: number {}, hash {}, happened at {}", + change.number, + change.l1_tx_hash, + change.happened_at + ); + } + for change in changes.prove { + vlog::info!( + "Prove status change: number {}, hash {}, happened at {}", + change.number, + change.l1_tx_hash, + change.happened_at + ); + } + for change in changes.execute { + vlog::info!( + "Execute status change: number {}, hash {}, happened at {}", + change.number, + change.l1_tx_hash, + change.happened_at + ); + } + } +} diff --git a/core/bin/zksync_core/src/sync_layer/external_io.rs b/core/bin/zksync_core/src/sync_layer/external_io.rs new file mode 100644 index 000000000000..ed480c8a37a0 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/external_io.rs @@ -0,0 +1,230 @@ +use std::time::Duration; + +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; + +use crate::state_keeper::{ + io::{ + common::{l1_batch_params, poll_until}, + L1BatchParams, PendingBatchData, StateKeeperIO, + }, + seal_criteria::SealerFn, + updates::UpdatesManager, +}; + +use super::sync_action::{ActionQueue, SyncAction}; + +/// The interval between the action queue polling attempts for the new actions. +const POLL_INTERVAL: Duration = Duration::from_millis(100); + +/// In the external node we don't actually decide whether we want to seal l1 batch or l2 block. +/// We must replicate the state as it's present in the main node. +/// This structure declares an "unconditional sealer" which would tell the state keeper to seal +/// blocks/batches at the same point as in the main node. +#[derive(Debug, Clone)] +pub struct ExternalNodeSealer { + actions: ActionQueue, +} + +impl ExternalNodeSealer { + pub fn new(actions: ActionQueue) -> Self { + Self { actions } + } + + fn should_seal_miniblock(&self) -> bool { + let res = matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)); + vlog::info!("Asked if should seal the miniblock. The answer is {res}"); + res + } + + fn should_seal_batch(&self) -> bool { + let res = matches!(self.actions.peek_action(), Some(SyncAction::SealBatch)); + vlog::info!("Asked if should seal the batch. The answer is {res}"); + res + } + + pub fn into_unconditional_batch_seal_criterion(self) -> Box { + Box::new(move |_| self.should_seal_batch()) + } + + pub fn into_miniblock_seal_criterion(self) -> Box { + Box::new(move |_| self.should_seal_miniblock()) + } +} + +/// ExternalIO is the IO abstraction for the state keeper that is used in the external node. +/// It receives a sequence of actions from the fetcher via the action queue and propagates it +/// into the state keeper. +/// +/// It is also responsible for the persisting of data, and this slice of logic is pretty close +/// to the one in the mempool IO (which is used in the main node). +#[derive(Debug)] +pub struct ExternalIO { + fee_account: Address, + + current_l1_batch_number: L1BatchNumber, + current_miniblock_number: MiniblockNumber, + actions: ActionQueue, +} + +impl ExternalIO { + pub fn new(fee_account: Address, actions: ActionQueue) -> Self { + Self { + fee_account, + current_l1_batch_number: L1BatchNumber(1), + current_miniblock_number: MiniblockNumber(1), + actions, + } + } +} + +impl StateKeeperIO for ExternalIO { + fn current_l1_batch_number(&self) -> L1BatchNumber { + self.current_l1_batch_number + } + + fn current_miniblock_number(&self) -> MiniblockNumber { + self.current_miniblock_number + } + + fn load_pending_batch(&mut self) -> Option { + None + } + + fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { + vlog::info!("Waiting for the new batch params"); + poll_until(POLL_INTERVAL, max_wait, || { + match self.actions.pop_action()? { + SyncAction::OpenBatch { + number, + timestamp, + l1_gas_price, + l2_fair_gas_price, + base_system_contracts_hashes, + } => { + assert_eq!( + number, self.current_l1_batch_number, + "Batch number mismatch" + ); + Some(l1_batch_params( + number, + self.fee_account, + timestamp, + Default::default(), + l1_gas_price, + l2_fair_gas_price, + load_base_contracts(base_system_contracts_hashes), + )) + } + other => { + panic!("Unexpected action in the action queue: {:?}", other); + } + } + }) + } + + fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option { + // Wait for the next miniblock to appear in the queue. + poll_until(POLL_INTERVAL, max_wait, || { + match self.actions.peek_action()? { + SyncAction::Miniblock { number, timestamp } => { + self.actions.pop_action(); // We found the miniblock, remove it from the queue. + assert_eq!( + number, self.current_miniblock_number, + "Miniblock number mismatch" + ); + Some(timestamp) + } + SyncAction::SealBatch => { + // We've reached the next batch, so this situation would be handled by the batch sealer. + // No need to pop the action from the queue. + // It also doesn't matter which timestamp we return, since there will be no more miniblocks in this + // batch. We return 0 to make it easy to detect if it ever appears somewhere. + Some(0) + } + other => { + panic!( + "Unexpected action in the queue while waiting for the next miniblock {:?}", + other + ); + } + } + }) + } + + fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { + vlog::info!( + "Waiting for the new tx, next action is {:?}", + self.actions.peek_action() + ); + poll_until(POLL_INTERVAL, max_wait, || { + // We keep polling until we get any item from the queue. + // Once we have the item, it'll be either a transaction, or a seal request. + // Whatever item it is, we don't have to poll anymore and may exit, thus double option use. + match self.actions.peek_action()? { + SyncAction::Tx(_) => { + let SyncAction::Tx(tx) = self.actions.pop_action().unwrap() else { unreachable!() }; + Some(Some(*tx)) + } + _ => Some(None), + } + })? + } + + fn rollback(&mut self, tx: &Transaction) { + // We are replaying the already sealed batches so no rollbacks are expected to occur. + panic!("Rollback requested: {:?}", tx); + } + + fn reject(&mut self, tx: &Transaction, error: &str) { + // We are replaying the already executed transactions so no rejections are expected to occur. + panic!( + "Reject requested because of the following error: {}.\n Transaction is: {:?}", + error, tx + ); + } + + fn seal_miniblock(&mut self, _updates_manager: &UpdatesManager) { + match self.actions.pop_action() { + Some(SyncAction::SealMiniblock) => {} + other => panic!( + "State keeper requested to seal miniblock, but the next action is {:?}", + other + ), + }; + self.current_miniblock_number += 1; + vlog::info!("Miniblock {} is sealed", self.current_miniblock_number); + } + + fn seal_l1_batch( + &mut self, + _block_result: vm::VmBlockResult, + _updates_manager: UpdatesManager, + _block_context: vm::vm_with_bootloader::DerivedBlockContext, + ) { + match self.actions.pop_action() { + Some(SyncAction::SealBatch) => {} + other => panic!( + "State keeper requested to seal the batch, but the next action is {:?}", + other + ), + }; + self.current_l1_batch_number += 1; + + vlog::info!("Batch {} is sealed", self.current_l1_batch_number); + } +} + +/// Currently it always returns the contracts that are present on the disk. +/// Later on, support for different base contracts versions will be added. +fn load_base_contracts(expected_hashes: BaseSystemContractsHashes) -> BaseSystemContracts { + let base_system_contracts = BaseSystemContracts::load_from_disk(); + let local_hashes = base_system_contracts.hashes(); + + assert_eq!( + local_hashes, expected_hashes, + "Local base system contract hashes do not match ones required to process the L1 batch" + ); + + base_system_contracts +} diff --git a/core/bin/zksync_core/src/sync_layer/fetcher.rs b/core/bin/zksync_core/src/sync_layer/fetcher.rs new file mode 100644 index 000000000000..c3908c378420 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/fetcher.rs @@ -0,0 +1,308 @@ +use std::time::Duration; + +use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber}; +use zksync_web3_decl::{ + jsonrpsee::{ + core::{Error as RpcError, RpcResult}, + http_client::{HttpClient, HttpClientBuilder}, + }, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, +}; + +use crate::sync_layer::sync_action::{BatchStatusChange, SyncAction}; + +use super::sync_action::ActionQueue; + +const DELAY_INTERVAL: Duration = Duration::from_millis(500); +const RECONNECT_INTERVAL: Duration = Duration::from_secs(5); + +/// Structure responsible for fetching batches and miniblock data from the main node. +#[derive(Debug)] +pub struct MainNodeFetcher { + main_node_url: String, + client: HttpClient, + current_l1_batch: L1BatchNumber, + current_miniblock: MiniblockNumber, + + last_executed_l1_batch: L1BatchNumber, + last_proven_l1_batch: L1BatchNumber, + last_committed_l1_batch: L1BatchNumber, + + actions: ActionQueue, +} + +impl MainNodeFetcher { + pub fn new( + main_node_url: &str, + current_l1_batch: L1BatchNumber, + current_miniblock: MiniblockNumber, + last_executed_l1_batch: L1BatchNumber, + last_proven_l1_batch: L1BatchNumber, + last_committed_l1_batch: L1BatchNumber, + actions: ActionQueue, + ) -> Self { + let client = Self::build_client(main_node_url); + + Self { + main_node_url: main_node_url.into(), + client, + current_l1_batch, + current_miniblock, + + last_executed_l1_batch, + last_proven_l1_batch, + last_committed_l1_batch, + + actions, + } + } + + fn build_client(main_node_url: &str) -> HttpClient { + HttpClientBuilder::default() + .build(main_node_url) + .expect("Unable to create a main node client") + } + + pub async fn run(mut self) { + vlog::info!( + "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", + self.current_miniblock, + self.current_l1_batch + ); + // Run the main routine and reconnect upon the network errors. + loop { + match self.run_inner().await { + Ok(()) => unreachable!("Fetcher actor never exits"), + Err(RpcError::Transport(err)) => { + vlog::warn!("Following transport error occurred: {}", err); + vlog::info!("Trying to reconnect"); + self.reconnect().await; + } + Err(err) => { + panic!("Unexpected error in the fetcher: {}", err); + } + } + } + } + + async fn reconnect(&mut self) { + loop { + self.client = Self::build_client(&self.main_node_url); + if self.client.chain_id().await.is_ok() { + vlog::info!("Reconnected"); + break; + } + vlog::warn!( + "Reconnect attempt unsuccessful. Next attempt would happen after a timeout" + ); + std::thread::sleep(RECONNECT_INTERVAL); + } + } + + async fn run_inner(&mut self) -> RpcResult<()> { + loop { + let mut progressed = false; + + if self.actions.has_action_capacity() { + progressed |= self.fetch_next_miniblock().await?; + } + if self.actions.has_status_change_capacity() { + progressed |= self.update_batch_statuses().await?; + } + + if !progressed { + // We didn't fetch any updated on this iteration, so to prevent a busy loop we wait a bit. + vlog::debug!("No updates to discover, waiting"); + std::thread::sleep(DELAY_INTERVAL); + } + } + } + + /// Tries to fetch the next miniblock and insert it to the sync queue. + /// Returns `true` if a miniblock was processed and `false` otherwise. + async fn fetch_next_miniblock(&mut self) -> RpcResult { + let Some(miniblock_header) = self + .client + .get_block_details(self.current_miniblock) + .await? + else { + return Ok(false); + }; + + let mut new_actions = Vec::new(); + if miniblock_header.l1_batch_number != self.current_l1_batch { + assert_eq!( + miniblock_header.l1_batch_number, + self.current_l1_batch.next(), + "Unexpected batch number in the next received miniblock" + ); + + vlog::info!( + "New batch: {}. Timestamp: {}", + miniblock_header.l1_batch_number, + miniblock_header.timestamp + ); + + new_actions.push(SyncAction::OpenBatch { + number: miniblock_header.l1_batch_number, + timestamp: miniblock_header.timestamp, + l1_gas_price: miniblock_header.l1_gas_price, + l2_fair_gas_price: miniblock_header.l2_fair_gas_price, + base_system_contracts_hashes: miniblock_header.base_system_contracts_hashes, + }); + + self.current_l1_batch += 1; + } else { + // New batch implicitly means a new miniblock, so we only need to push the miniblock action + // if it's not a new batch. + new_actions.push(SyncAction::Miniblock { + number: miniblock_header.number, + timestamp: miniblock_header.timestamp, + }); + } + + let miniblock_txs = self + .client + .get_raw_block_transactions(self.current_miniblock) + .await? + .into_iter() + .map(|tx| SyncAction::Tx(Box::new(tx))); + new_actions.extend(miniblock_txs); + new_actions.push(SyncAction::SealMiniblock); + + // Check if this was the last miniblock in the batch. + // If we will receive `None` here, it would mean that it's the currently open batch and it was not sealed + // after the current miniblock. + let is_last_miniblock_of_batch = self + .client + .get_miniblock_range(self.current_l1_batch) + .await? + .map(|(_, last)| last.as_u32() == miniblock_header.number.0) + .unwrap_or(false); + if is_last_miniblock_of_batch { + new_actions.push(SyncAction::SealBatch); + } + + vlog::info!("New miniblock: {}", miniblock_header.number); + self.current_miniblock += 1; + self.actions.push_actions(new_actions); + Ok(true) + } + + /// Goes through the already fetched batches trying to update their statuses. + /// Returns `true` if at least one batch was updated, and `false` otherwise. + async fn update_batch_statuses(&mut self) -> RpcResult { + assert!( + self.last_executed_l1_batch <= self.last_proven_l1_batch, + "Incorrect local state: executed batch must be proven" + ); + assert!( + self.last_proven_l1_batch <= self.last_committed_l1_batch, + "Incorrect local state: proven batch must be committed" + ); + assert!( + self.last_committed_l1_batch <= self.current_l1_batch, + "Incorrect local state: unkonwn batch marked as committed" + ); + + let mut applied_updates = false; + for batch in + (self.last_executed_l1_batch.next().0..=self.current_l1_batch.0).map(L1BatchNumber) + { + // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not + // be sent to L1. + let Some((start_miniblock, _)) = self.client.get_miniblock_range(batch).await? else { + return Ok(applied_updates); + }; + // We could've used any miniblock from the range, all of them share the same info. + let Some(batch_info) = self + .client + .get_block_details(MiniblockNumber(start_miniblock.as_u32())) + .await? + else { + // We cannot recover from an external API inconsistency. + panic!( + "Node API is inconsistent: miniblock {} was reported to be a part of {} L1batch, \ + but API has no information about this miniblock", start_miniblock, batch + ); + }; + + applied_updates |= self.update_committed_batch(&batch_info); + applied_updates |= self.update_proven_batch(&batch_info); + applied_updates |= self.update_executed_batch(&batch_info); + + if batch_info.commit_tx_hash.is_none() { + // No committed batches after this one. + break; + } + } + + Ok(applied_updates) + } + + /// Returns `true` if batch info was updated. + fn update_committed_batch(&mut self, batch_info: &BlockDetails) -> bool { + if batch_info.commit_tx_hash.is_some() + && batch_info.l1_batch_number == self.last_committed_l1_batch.next() + { + assert!( + batch_info.committed_at.is_some(), + "Malformed API response: batch is committed, but has no commit timestamp" + ); + self.actions.push_commit_status_change(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.commit_tx_hash.unwrap(), + happened_at: batch_info.committed_at.unwrap(), + }); + vlog::info!("Batch {}: committed", batch_info.l1_batch_number); + self.last_committed_l1_batch += 1; + true + } else { + false + } + } + + /// Returns `true` if batch info was updated. + fn update_proven_batch(&mut self, batch_info: &BlockDetails) -> bool { + if batch_info.prove_tx_hash.is_some() + && batch_info.l1_batch_number == self.last_proven_l1_batch.next() + { + assert!( + batch_info.proven_at.is_some(), + "Malformed API response: batch is proven, but has no prove timestamp" + ); + self.actions.push_prove_status_change(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.prove_tx_hash.unwrap(), + happened_at: batch_info.proven_at.unwrap(), + }); + vlog::info!("Batch {}: proven", batch_info.l1_batch_number); + self.last_proven_l1_batch += 1; + true + } else { + false + } + } + + /// Returns `true` if batch info was updated. + fn update_executed_batch(&mut self, batch_info: &BlockDetails) -> bool { + if batch_info.execute_tx_hash.is_some() + && batch_info.l1_batch_number == self.last_executed_l1_batch.next() + { + assert!( + batch_info.executed_at.is_some(), + "Malformed API response: batch is executed, but has no execute timestamp" + ); + self.actions.push_execute_status_change(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.execute_tx_hash.unwrap(), + happened_at: batch_info.executed_at.unwrap(), + }); + vlog::info!("Batch {}: executed", batch_info.l1_batch_number); + self.last_executed_l1_batch += 1; + true + } else { + false + } + } +} diff --git a/core/bin/zksync_core/src/sync_layer/genesis.rs b/core/bin/zksync_core/src/sync_layer/genesis.rs new file mode 100644 index 000000000000..06e3101622d1 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/genesis.rs @@ -0,0 +1,41 @@ +use crate::genesis::ensure_genesis_state; + +use zksync_config::ZkSyncConfig; +use zksync_dal::StorageProcessor; +use zksync_types::{L1BatchNumber, H256}; +use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; + +pub async fn perform_genesis_if_needed(storage: &mut StorageProcessor<'_>, config: &ZkSyncConfig) { + let mut transaction = storage.start_transaction().await; + let main_node_url = config + .api + .web3_json_rpc + .main_node_url + .as_ref() + .expect("main node url is not set"); + + let genesis_block_hash = ensure_genesis_state(&mut transaction, config).await; + + validate_genesis_state(main_node_url, genesis_block_hash).await; + transaction.commit().await; +} + +// When running an external node, we want to make sure we have the same +// genesis root hash as the main node. +async fn validate_genesis_state(main_node_url: &str, root_hash: H256) { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + let genesis_block = client + .get_l1_batch_details(L1BatchNumber(0)) + .await + .expect("couldn't get genesis block from the main node") + .expect("main node did not return a genesis block"); + + let genesis_block_hash = genesis_block.root_hash.expect("empty genesis block hash"); + + if genesis_block_hash != root_hash { + panic!( + "Genesis block root hash mismatch with main node: expected {}, got {}", + root_hash, genesis_block_hash + ); + } +} diff --git a/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs b/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs new file mode 100644 index 000000000000..080f3126d8aa --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs @@ -0,0 +1,103 @@ +//! This module provide a mock batch executor that in fact does not execute any transactions. +//! This is a stub that is helpful for the development of the External Node, as it allows to +//! not focus on the execution of the transactions, but rather only care about the data flow between +//! the fetcher and the state keeper. +//! +//! This is temporary module which will be removed once EN binary is more or less ready. +//! It also has a fair amount of copy-paste from the state keeper tests, which is OK, given that this module +//! is temporary and otherwise we would've had to make the state keeper tests public. + +use std::sync::mpsc; + +use vm::{ + vm::{VmPartialExecutionResult, VmTxExecutionResult}, + VmBlockResult, VmExecutionResult, +}; +use zksync_types::tx::tx_execution_info::TxExecutionStatus; + +use crate::state_keeper::{ + batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, + io::L1BatchParams, + types::ExecutionMetricsForCriteria, +}; + +#[derive(Debug)] +pub struct MockBatchExecutorBuilder; + +impl L1BatchExecutorBuilder for MockBatchExecutorBuilder { + fn init_batch(&self, _l1_batch_params: L1BatchParams) -> BatchExecutorHandle { + let (tx, rx) = mpsc::channel::(); + let responder_thread_handle = std::thread::spawn(move || loop { + let action = rx.recv().unwrap(); + match action { + Command::ExecuteTx(_, resp) => { + resp.send(successful_exec()).unwrap(); + } + Command::RollbackLastTx(_resp) => { + panic!("Rollback should never happen"); + } + Command::FinishBatch(resp) => { + // Blanket result, it doesn't really matter. + let result = VmBlockResult { + full_result: VmExecutionResult { + events: Default::default(), + storage_log_queries: Default::default(), + used_contract_hashes: Default::default(), + l2_to_l1_logs: Default::default(), + return_data: Default::default(), + gas_used: Default::default(), + contracts_used: Default::default(), + revert_reason: Default::default(), + trace: Default::default(), + total_log_queries: Default::default(), + cycles_used: Default::default(), + }, + block_tip_result: VmPartialExecutionResult { + logs: Default::default(), + revert_reason: Default::default(), + contracts_used: Default::default(), + cycles_used: Default::default(), + }, + }; + + resp.send(result).unwrap(); + break; + } + } + }); + + BatchExecutorHandle::from_raw(responder_thread_handle, tx) + } +} + +fn partial_execution_result() -> VmPartialExecutionResult { + VmPartialExecutionResult { + logs: Default::default(), + revert_reason: Default::default(), + contracts_used: Default::default(), + cycles_used: Default::default(), + } +} + +/// Creates a `TxExecutionResult` object denoting a successful tx execution. +pub(crate) fn successful_exec() -> TxExecutionResult { + let mut result = TxExecutionResult::new(Ok(( + VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: partial_execution_result(), + gas_refunded: 0, + operator_suggested_refund: 0, + }, + vec![], + ))); + result.add_tx_metrics(ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), + }); + result.add_bootloader_result(Ok(partial_execution_result())); + result.add_bootloader_metrics(ExecutionMetricsForCriteria { + l1_gas: Default::default(), + execution_metrics: Default::default(), + }); + result +} diff --git a/core/bin/zksync_core/src/sync_layer/mod.rs b/core/bin/zksync_core/src/sync_layer/mod.rs new file mode 100644 index 000000000000..6ae8b2eb8d39 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/mod.rs @@ -0,0 +1,11 @@ +pub mod batch_status_updater; +pub mod external_io; +pub mod fetcher; +pub mod genesis; +pub mod mock_batch_executor; +pub(crate) mod sync_action; + +pub use self::{ + external_io::{ExternalIO, ExternalNodeSealer}, + sync_action::ActionQueue, +}; diff --git a/core/bin/zksync_core/src/sync_layer/sync_action.rs b/core/bin/zksync_core/src/sync_layer/sync_action.rs new file mode 100644 index 000000000000..bf60a7831878 --- /dev/null +++ b/core/bin/zksync_core/src/sync_layer/sync_action.rs @@ -0,0 +1,328 @@ +use std::{ + collections::VecDeque, + sync::{Arc, RwLock}, +}; + +use chrono::{DateTime, Utc}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction, H256}; + +/// Action queue is used to communicate between the fetcher and the rest of the external node +/// by collecting the fetched data in memory until it gets processed by the different entities. +#[derive(Debug, Clone, Default)] +pub struct ActionQueue { + inner: Arc>, +} + +impl ActionQueue { + pub fn new() -> Self { + Self::default() + } + + /// Removes the first action from the queue. + pub(crate) fn pop_action(&self) -> Option { + let mut write_lock = self.inner.write().unwrap(); + write_lock.actions.pop_front() + } + + /// Returns the first action from the queue without removing it. + pub(crate) fn peek_action(&self) -> Option { + let read_lock = self.inner.read().unwrap(); + read_lock.actions.front().cloned() + } + + /// Returns true if the queue has capacity for a new action. + /// Capacity is limited to avoid memory exhaustion. + pub(crate) fn has_action_capacity(&self) -> bool { + const ACTION_CAPACITY: usize = 32_768; + + // Since the capacity is read before the action is pushed, + // it is possible that the capacity will be exceeded, since the fetcher will + // decompose received data into a sequence of actions. + // This is not a problem, since the size of decomposed action is much smaller + // than the configured capacity. + let read_lock = self.inner.read().unwrap(); + read_lock.actions.len() < ACTION_CAPACITY + } + + /// Returns true if the queue has capacity for a new status change. + /// Capacity is limited to avoid memory exhaustion. + pub(crate) fn has_status_change_capacity(&self) -> bool { + const STATUS_CHANGE_CAPACITY: usize = 8192; + + // We don't really care about any particular queue size, as the only intention + // of this check is to prevent memory exhaustion. + let read_lock = self.inner.read().unwrap(); + read_lock.commit_status_changes.len() < STATUS_CHANGE_CAPACITY + && read_lock.prove_status_changes.len() < STATUS_CHANGE_CAPACITY + && read_lock.execute_status_changes.len() < STATUS_CHANGE_CAPACITY + } + + /// Pushes a set of actions to the queue. + /// + /// Requires that the actions are in the correct order: starts with a new open batch/miniblock, + /// followed by 0 or more transactions, have mandatory `SealMiniblock` and optional `SealBatch` at the end. + /// Would panic if the order is incorrect. + pub(crate) fn push_actions(&self, actions: Vec) { + // We need to enforce the ordering of actions to make sure that they can be processed. + Self::check_action_sequence(&actions).expect("Invalid sequence of actions."); + + let mut write_lock = self.inner.write().unwrap(); + write_lock.actions.extend(actions); + } + + /// Pushes a notification about certain batch being committed. + pub(crate) fn push_commit_status_change(&self, change: BatchStatusChange) { + let mut write_lock = self.inner.write().unwrap(); + write_lock.commit_status_changes.push_back(change); + } + + /// Pushes a notification about certain batch being proven. + pub(crate) fn push_prove_status_change(&self, change: BatchStatusChange) { + let mut write_lock = self.inner.write().unwrap(); + write_lock.prove_status_changes.push_back(change); + } + + /// Pushes a notification about certain batch being executed. + pub(crate) fn push_execute_status_change(&self, change: BatchStatusChange) { + let mut write_lock = self.inner.write().unwrap(); + write_lock.execute_status_changes.push_back(change); + } + + /// Collects all status changes and returns them. + pub(crate) fn take_status_changes(&self) -> StatusChanges { + let mut write_lock = self.inner.write().unwrap(); + StatusChanges { + commit: write_lock.commit_status_changes.drain(..).collect(), + prove: write_lock.prove_status_changes.drain(..).collect(), + execute: write_lock.execute_status_changes.drain(..).collect(), + } + } + + /// Checks whether the action sequence is valid. + /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable + /// error. This function itself does not panic for the ease of testing. + fn check_action_sequence(actions: &[SyncAction]) -> Result<(), String> { + // Rules for the sequence: + // 1. Must start with either `OpenBatch` or `Miniblock`, both of which may be met only once. + // 2. Followed by a sequence of `Tx` actions which consists of 0 or more elements. + // 3. Must have `SealMiniblock` come after transactions. + // 4. May or may not have `SealBatch` come after `SealMiniblock`. + + let mut opened = false; + let mut miniblock_sealed = false; + let mut batch_sealed = false; + + for action in actions { + match action { + SyncAction::OpenBatch { .. } | SyncAction::Miniblock { .. } => { + if opened { + return Err(format!("Unexpected OpenBatch/Miniblock: {:?}", actions)); + } + opened = true; + } + SyncAction::Tx(_) => { + if !opened || miniblock_sealed { + return Err(format!("Unexpected Tx: {:?}", actions)); + } + } + SyncAction::SealMiniblock => { + if !opened || miniblock_sealed { + return Err(format!("Unexpected SealMiniblock: {:?}", actions)); + } + miniblock_sealed = true; + } + SyncAction::SealBatch => { + if !miniblock_sealed || batch_sealed { + return Err(format!("Unexpected SealBatch: {:?}", actions)); + } + batch_sealed = true; + } + } + } + if !miniblock_sealed { + return Err(format!("Incomplete sequence: {:?}", actions)); + } + Ok(()) + } +} + +#[derive(Debug)] +pub(crate) struct StatusChanges { + pub(crate) commit: Vec, + pub(crate) prove: Vec, + pub(crate) execute: Vec, +} + +#[derive(Debug, Default)] +struct ActionQueueInner { + actions: VecDeque, + commit_status_changes: VecDeque, + prove_status_changes: VecDeque, + execute_status_changes: VecDeque, +} + +/// An instruction for the ExternalIO to request a certain action from the state keeper. +#[derive(Debug, Clone)] +pub(crate) enum SyncAction { + OpenBatch { + number: L1BatchNumber, + timestamp: u64, + l1_gas_price: u64, + l2_fair_gas_price: u64, + base_system_contracts_hashes: BaseSystemContractsHashes, + }, + Miniblock { + number: MiniblockNumber, + timestamp: u64, + }, + Tx(Box), + /// We need an explicit action for the miniblock sealing, since we fetch the whole miniblocks and already know + /// that they are sealed, but at the same time the next miniblock may not exist yet. + /// By having a dedicated action for that we prevent a situation where the miniblock is kept open on the EN until + /// the next one is sealed on the main node. + SealMiniblock, + /// Similarly to `SealMiniblock` we must be able to seal the batch even if there is no next miniblock yet. + SealBatch, +} + +/// Represents a change in the batch status. +/// It may be a batch being committed, proven or executed. +#[derive(Debug)] +pub(crate) struct BatchStatusChange { + pub(crate) number: L1BatchNumber, + pub(crate) l1_tx_hash: H256, + pub(crate) happened_at: DateTime, +} + +#[cfg(test)] +mod tests { + use zksync_types::l2::L2Tx; + + use super::*; + + fn open_batch() -> SyncAction { + SyncAction::OpenBatch { + number: 1.into(), + timestamp: 1, + l1_gas_price: 1, + l2_fair_gas_price: 1, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + } + } + + fn miniblock() -> SyncAction { + SyncAction::Miniblock { + number: 1.into(), + timestamp: 1, + } + } + + fn tx() -> SyncAction { + let mut tx = L2Tx::new( + Default::default(), + Default::default(), + 0.into(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); + tx.set_input(H256::default().0.to_vec(), H256::default()); + + SyncAction::Tx(Box::new(tx.into())) + } + + fn seal_miniblock() -> SyncAction { + SyncAction::SealMiniblock + } + + fn seal_batch() -> SyncAction { + SyncAction::SealBatch + } + + #[test] + fn correct_sequence() { + let test_vector = vec![ + vec![open_batch(), seal_miniblock()], + vec![open_batch(), tx(), seal_miniblock()], + vec![open_batch(), seal_miniblock(), seal_batch()], + vec![open_batch(), tx(), seal_miniblock(), seal_batch()], + vec![miniblock(), seal_miniblock()], + vec![miniblock(), tx(), seal_miniblock()], + vec![miniblock(), seal_miniblock(), seal_batch()], + vec![miniblock(), tx(), seal_miniblock(), seal_batch()], + ]; + for (idx, sequence) in test_vector.into_iter().enumerate() { + ActionQueue::check_action_sequence(&sequence) + .unwrap_or_else(|_| panic!("Valid sequence #{} failed", idx)); + } + } + + #[test] + fn incorrect_sequence() { + // Note: it is very important to check the exact error that occurs to prevent the test to pass if sequence is + // considered invalid e.g. because it's incomplete. + let test_vector = vec![ + // Incomplete sequences. + (vec![open_batch()], "Incomplete sequence"), + (vec![open_batch(), tx()], "Incomplete sequence"), + (vec![miniblock()], "Incomplete sequence"), + (vec![miniblock(), tx()], "Incomplete sequence"), + // Unexpected tx + (vec![tx()], "Unexpected Tx"), + (vec![open_batch(), seal_miniblock(), tx()], "Unexpected Tx"), + // Unexpected OpenBatch/Miniblock + ( + vec![miniblock(), miniblock()], + "Unexpected OpenBatch/Miniblock", + ), + ( + vec![miniblock(), open_batch()], + "Unexpected OpenBatch/Miniblock", + ), + ( + vec![open_batch(), miniblock()], + "Unexpected OpenBatch/Miniblock", + ), + // Unexpected SealMiniblock + (vec![seal_miniblock()], "Unexpected SealMiniblock"), + ( + vec![miniblock(), seal_miniblock(), seal_miniblock()], + "Unexpected SealMiniblock", + ), + // Unexpected SealBatch. + ( + vec![open_batch(), tx(), seal_batch()], + "Unexpected SealBatch", + ), + (vec![open_batch(), seal_batch()], "Unexpected SealBatch"), + ( + vec![open_batch(), seal_miniblock(), seal_batch(), seal_batch()], + "Unexpected SealBatch", + ), + (vec![miniblock(), seal_batch()], "Unexpected SealBatch"), + ( + vec![miniblock(), seal_miniblock(), seal_batch(), seal_batch()], + "Unexpected SealBatch", + ), + (vec![seal_batch()], "Unexpected SealBatch"), + ( + vec![miniblock(), tx(), seal_batch()], + "Unexpected SealBatch", + ), + ]; + for (idx, (sequence, expected_err)) in test_vector.into_iter().enumerate() { + let err = + ActionQueue::check_action_sequence(&sequence).expect_err("Invalid sequence passed"); + assert!( + err.starts_with(expected_err), + "Sequence #{} failed. Expected error: {}, got: {}", + idx, + expected_err, + err + ); + } + } +} diff --git a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs index bf8efc02685e..1ff506ff1539 100644 --- a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs @@ -6,10 +6,9 @@ use std::rc::Rc; use std::time::Instant; use vm::zk_evm::bitflags::_core::cell::RefCell; use vm::zk_evm::ethereum_types::H256; -use vm::{StorageOracle, MAX_CYCLES_FOR_TX}; +use vm::{memory::SimpleMemory, StorageOracle, MAX_CYCLES_FOR_TX}; use zksync_config::configs::WitnessGeneratorConfig; use zksync_config::constants::BOOTLOADER_ADDRESS; -use zksync_contracts::{read_proved_block_bootloader_bytecode, read_sys_contract_bytecode}; use zksync_dal::ConnectionPool; use zksync_object_store::gcs_utils::{ basic_circuits_blob_url, basic_circuits_inputs_blob_url, merkle_tree_paths_blob_url, @@ -35,7 +34,6 @@ use zksync_types::{ }, Address, L1BatchNumber, U256, }; -use zksync_utils::bytecode::hash_bytecode; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use crate::db_storage_provider::DbStorageProvider; @@ -115,7 +113,7 @@ pub fn update_database( ); transaction.commit_blocking(); - track_witness_generation_stage(block_number, started_at, AggregationRound::BasicCircuits); + track_witness_generation_stage(started_at, AggregationRound::BasicCircuits); } pub async fn get_artifacts( @@ -202,9 +200,14 @@ pub fn build_basic_circuits_witness_generator_input( .blocks_dal() .get_block_header(block_number - 1) .unwrap(); + let previous_block_hash = connection + .blocks_dal() + .get_block_state_root(block_number - 1) + .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { block_number, previous_block_timestamp: previous_block_header.timestamp, + previous_block_hash, block_timestamp: block_header.timestamp, used_bytecodes_hashes: block_header.used_contract_hashes, initial_heap_content: block_header.initial_bootloader_contents, @@ -222,33 +225,47 @@ pub fn generate_witness( SchedulerCircuitInstanceWitness, ) { let mut connection = connection_pool.access_storage_blocking(); - - let account_bytecode = read_sys_contract_bytecode("", "DefaultAccount"); - let account_code_hash = h256_to_u256(hash_bytecode(&account_bytecode)); - let bootloader_code_bytes = read_proved_block_bootloader_bytecode(); - let bootloader_code_hash = h256_to_u256(hash_bytecode(&bootloader_code_bytes)); + let header = connection + .blocks_dal() + .get_block_header(input.block_number) + .unwrap(); + let bootloader_code_bytes = connection + .storage_dal() + .get_factory_dep(header.base_system_contracts_hashes.bootloader) + .expect("Bootloader bytecode should exist"); let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); + let account_bytecode_bytes = connection + .storage_dal() + .get_factory_dep(header.base_system_contracts_hashes.default_aa) + .expect("Default aa bytecode should exist"); + let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let bootloader_contents = expand_bootloader_contents(input.initial_heap_content); + let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); let hashes: HashSet = input .used_bytecodes_hashes .iter() // SMA-1555: remove this hack once updated to the latest version of zkevm_test_harness - .filter(|&&hash| hash != bootloader_code_hash) + .filter(|&&hash| hash != h256_to_u256(header.base_system_contracts_hashes.bootloader)) .map(|hash| u256_to_h256(*hash)) .collect(); let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes); if input.used_bytecodes_hashes.contains(&account_code_hash) { - used_bytecodes.insert(account_code_hash, bytes_to_chunks(&account_bytecode)); + used_bytecodes.insert(account_code_hash, account_bytecode); + } + let factory_dep_bytecode_hashes: HashSet = used_bytecodes + .clone() + .keys() + .map(|&hash| u256_to_h256(hash)) + .collect(); + let missing_deps: HashSet<_> = hashes + .difference(&factory_dep_bytecode_hashes) + .cloned() + .collect(); + if !missing_deps.is_empty() { + vlog::error!("{:?} factory deps are not found in DB", missing_deps); } - - assert_eq!( - hashes.len(), - used_bytecodes.len(), - "{} factory deps are not found in DB", - hashes.len() - used_bytecodes.len() - ); // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. // Probably, we should make it work with L1 batch numbers too. @@ -257,10 +274,14 @@ pub fn generate_witness( .get_miniblock_range_of_l1_batch(input.block_number - 1) .expect("L1 batch should contain at least one miniblock"); let db_storage_provider = DbStorageProvider::new(connection, last_miniblock_number, true); - let mut tree = PrecalculatedMerklePathsProvider::new(input.merkle_paths_input); + let mut tree = PrecalculatedMerklePathsProvider::new( + input.merkle_paths_input, + input.previous_block_hash.0.to_vec(), + ); let storage_ptr: &mut dyn vm::storage::Storage = &mut StorageView::new(db_storage_provider); let storage_oracle = StorageOracle::new(Rc::new(RefCell::new(storage_ptr))); + let memory = SimpleMemory::default(); let mut hasher = DefaultHasher::new(); GEOMETRY_CONFIG.hash(&mut hasher); vlog::info!( @@ -301,6 +322,7 @@ pub fn generate_witness( MAX_CYCLES_FOR_TX as usize, GEOMETRY_CONFIG, storage_oracle, + memory, &mut tree, ) } @@ -351,17 +373,17 @@ fn save_run_with_fixed_params_args_to_gcs( #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct RunWithFixedParamsInput { - l1_batch_number: u32, - last_miniblock_number: u32, - caller: Address, - entry_point_address: Address, - entry_point_code: Vec<[u8; 32]>, - initial_heap_content: Vec, - zk_porter_is_available: bool, - default_aa_code_hash: U256, - used_bytecodes: HashMap>, - ram_verification_queries: Vec<(u32, U256)>, - cycle_limit: usize, - geometry: GeometryConfig, - tree: PrecalculatedMerklePathsProvider, + pub l1_batch_number: u32, + pub last_miniblock_number: u32, + pub caller: Address, + pub entry_point_address: Address, + pub entry_point_code: Vec<[u8; 32]>, + pub initial_heap_content: Vec, + pub zk_porter_is_available: bool, + pub default_aa_code_hash: U256, + pub used_bytecodes: HashMap>, + pub ram_verification_queries: Vec<(u32, U256)>, + pub cycle_limit: usize, + pub geometry: GeometryConfig, + pub tree: PrecalculatedMerklePathsProvider, } diff --git a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs index 92a6aef15f20..589c6d784639 100644 --- a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -148,7 +148,7 @@ pub fn update_database( ); transaction.commit_blocking(); - track_witness_generation_stage(block_number, started_at, AggregationRound::LeafAggregation); + track_witness_generation_stage(started_at, AggregationRound::LeafAggregation); } pub async fn get_artifacts( diff --git a/core/bin/zksync_core/src/witness_generator/mod.rs b/core/bin/zksync_core/src/witness_generator/mod.rs index 25dbe18f4c1f..ffc28b0ef8d9 100644 --- a/core/bin/zksync_core/src/witness_generator/mod.rs +++ b/core/bin/zksync_core/src/witness_generator/mod.rs @@ -2,11 +2,9 @@ use std::fmt::Debug; use std::time::Instant; use async_trait::async_trait; +use rand::Rng; -use zksync_config::configs::prover::ProverConfigs; -use zksync_config::configs::witness_generator::SamplingMode; use zksync_config::configs::WitnessGeneratorConfig; -use zksync_config::ProverConfig; use zksync_dal::ConnectionPool; use zksync_object_store::object_store::create_object_store_from_env; use zksync_queued_job_processor::JobProcessor; @@ -98,33 +96,23 @@ impl WitnessGenerator { started_at: Instant, ) -> Option { let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); - let prover_config: ProverConfig = ProverConfigs::from_env().non_gpu; let WitnessGeneratorJob { block_number, job } = job; - if let ( - SamplingMode::Enabled(sampling_params), - &WitnessGeneratorJobInput::BasicCircuits(_), - ) = (config.sampling_mode(), &job) + if let (Some(blocks_proving_percentage), &WitnessGeneratorJobInput::BasicCircuits(_)) = + (config.blocks_proving_percentage, &job) { - let mut storage = connection_pool.access_storage_blocking(); - - let last_sealed_l1_batch_number = storage.blocks_dal().get_sealed_block_number(); - let min_unproved_l1_batch_number = storage - .prover_dal() - .min_unproved_l1_batch_number(prover_config.max_attempts) - .unwrap_or(last_sealed_l1_batch_number); - let prover_lag = last_sealed_l1_batch_number.0 - min_unproved_l1_batch_number.0; - - let sampling_probability = - sampling_params.calculate_sampling_probability(prover_lag as usize); - - // Generate random number in [0; 1). - let rand_value = rand::random::(); - // We get value higher than `sampling_probability` with prob = `1 - sampling_probability`. + // Generate random number in (0; 100). + let rand_value = rand::thread_rng().gen_range(1..100); + // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. // In this case job should be skipped. - if rand_value > sampling_probability { + if rand_value > blocks_proving_percentage { metrics::counter!("server.witness_generator.skipped_blocks", 1); - vlog::info!("Skipping witness generation for block {}, prover lag: {}, sampling probability: {}", block_number.0, prover_lag, sampling_probability); + vlog::info!( + "Skipping witness generation for block {}, blocks_proving_percentage: {}", + block_number.0, + blocks_proving_percentage + ); + let mut storage = connection_pool.access_storage_blocking(); storage .witness_generator_dal() .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); @@ -192,12 +180,13 @@ impl JobProcessor for WitnessGenerator { ) -> Option<(Self::JobId, Self::Job)> { let mut connection = connection_pool.access_storage().await; let object_store = create_object_store_from_env(); - + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); let optional_metadata = connection .witness_generator_dal() .get_next_scheduler_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, + last_l1_batch_to_process, ); if let Some(metadata) = optional_metadata { @@ -224,6 +213,7 @@ impl JobProcessor for WitnessGenerator { .get_next_node_aggregation_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, + last_l1_batch_to_process, ); if let Some(metadata) = optional_metadata { @@ -236,6 +226,7 @@ impl JobProcessor for WitnessGenerator { .get_next_leaf_aggregation_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, + last_l1_batch_to_process, ); if let Some(metadata) = optional_metadata { @@ -248,6 +239,7 @@ impl JobProcessor for WitnessGenerator { .get_next_basic_circuit_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, + last_l1_batch_to_process, ); if let Some(metadata) = optional_metadata { @@ -387,11 +379,7 @@ fn get_circuit_types(serialized_circuits: &[(String, Vec)]) -> Vec { .collect() } -fn track_witness_generation_stage( - block_number: L1BatchNumber, - started_at: Instant, - round: AggregationRound, -) { +fn track_witness_generation_stage(started_at: Instant, round: AggregationRound) { let stage = match round { AggregationRound::BasicCircuits => "basic_circuits", AggregationRound::LeafAggregation => "leaf_aggregation", @@ -403,11 +391,6 @@ fn track_witness_generation_stage( started_at.elapsed(), "stage" => format!("wit_gen_{}", stage) ); - metrics::gauge!( - "server.block_number", - block_number.0 as f64, - "stage" => format!("wit_gen_{}", stage) - ); } fn serialize_circuits( diff --git a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs index e5095d967264..73f18f62fccd 100644 --- a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs @@ -184,7 +184,7 @@ pub fn update_database( ); transaction.commit_blocking(); - track_witness_generation_stage(block_number, started_at, AggregationRound::NodeAggregation); + track_witness_generation_stage(started_at, AggregationRound::NodeAggregation); } pub async fn get_artifacts( diff --git a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs index 6215abc302a6..4f585347b1bf 100644 --- a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs +++ b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs @@ -21,13 +21,7 @@ pub struct PrecalculatedMerklePathsProvider { } impl PrecalculatedMerklePathsProvider { - pub fn new(input: PrepareBasicCircuitsJob) -> Self { - let root_hash = input - .merkle_paths - .first() - .map(|e| e.root_hash.clone()) - .unwrap_or_else(|| vec![0; 32]); - + pub fn new(input: PrepareBasicCircuitsJob, root_hash: Vec) -> Self { vlog::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, input.next_enumeration_index); Self { root_hash, diff --git a/core/bin/zksync_core/src/witness_generator/scheduler.rs b/core/bin/zksync_core/src/witness_generator/scheduler.rs index ff5bb6aca098..6ef3bc58e751 100644 --- a/core/bin/zksync_core/src/witness_generator/scheduler.rs +++ b/core/bin/zksync_core/src/witness_generator/scheduler.rs @@ -177,7 +177,7 @@ pub fn update_database( ); transaction.commit_blocking(); - track_witness_generation_stage(block_number, started_at, AggregationRound::Scheduler); + track_witness_generation_stage(started_at, AggregationRound::Scheduler); } pub async fn save_artifacts( diff --git a/core/bin/zksync_core/src/witness_generator/utils.rs b/core/bin/zksync_core/src/witness_generator/utils.rs index 1749c3a9740b..e5726bf5c3c6 100644 --- a/core/bin/zksync_core/src/witness_generator/utils.rs +++ b/core/bin/zksync_core/src/witness_generator/utils.rs @@ -1,11 +1,12 @@ -use vm::zk_evm::abstractions::MEMORY_CELLS_OTHER_PAGES; +use vm::zk_evm::abstractions::MAX_MEMORY_BYTES; use vm::zk_evm::ethereum_types::U256; use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; use zksync_object_store::object_store::{DynamicObjectStore, PROVER_JOBS_BUCKET_PATH}; use zksync_types::{proofs::AggregationRound, L1BatchNumber}; pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { - let mut result: [u8; MEMORY_CELLS_OTHER_PAGES] = [0; MEMORY_CELLS_OTHER_PAGES]; + let mut result: Vec = Vec::new(); + result.resize(MAX_MEMORY_BYTES, 0); for (offset, value) in packed { value.to_big_endian(&mut result[(offset * 32)..(offset + 1) * 32]); diff --git a/core/lib/circuit_breaker/src/code_hashes.rs b/core/lib/circuit_breaker/src/code_hashes.rs deleted file mode 100644 index d497df4bb1c4..000000000000 --- a/core/lib/circuit_breaker/src/code_hashes.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::{CircuitBreaker, CircuitBreakerError}; -use thiserror::Error; -use zksync_config::ZkSyncConfig; -use zksync_contracts::{DEFAULT_ACCOUNT_CODE, PROVED_BLOCK_BOOTLOADER_CODE}; -use zksync_eth_client::clients::http_client::EthereumClient; -use zksync_types::H256; -use zksync_utils::u256_to_h256; - -#[derive(Debug, Error)] -pub enum MismatchedCodeHashError { - #[error("Server has different bootloader code hash from the one on L1 contract, server: {server_hash:?}, contract: {contract_hash:?}")] - Bootloader { - server_hash: H256, - contract_hash: H256, - }, - #[error("Server has different default account code hash from the one on L1 contract, server: {server_hash:?}, contract: {contract_hash:?}")] - DefaultAccount { - server_hash: H256, - contract_hash: H256, - }, -} - -#[derive(Debug)] -pub struct CodeHashesChecker { - pub eth_client: EthereumClient, -} - -impl CodeHashesChecker { - pub fn new(config: &ZkSyncConfig) -> Self { - Self { - eth_client: EthereumClient::from_config(config), - } - } -} - -#[async_trait::async_trait] -impl CircuitBreaker for CodeHashesChecker { - async fn check(&self) -> Result<(), CircuitBreakerError> { - let bootloader_code_hash_on_l1: H256 = self - .eth_client - .call_main_contract_function( - "getL2BootloaderBytecodeHash", - (), - None, - Default::default(), - None, - ) - .await - .unwrap(); - if bootloader_code_hash_on_l1 != u256_to_h256(PROVED_BLOCK_BOOTLOADER_CODE.hash) { - return Err(CircuitBreakerError::MismatchedCodeHash( - MismatchedCodeHashError::Bootloader { - server_hash: u256_to_h256(PROVED_BLOCK_BOOTLOADER_CODE.hash), - contract_hash: bootloader_code_hash_on_l1, - }, - )); - } - - let default_account_code_hash_on_l1: H256 = self - .eth_client - .call_main_contract_function( - "getL2DefaultAccountBytecodeHash", - (), - None, - Default::default(), - None, - ) - .await - .unwrap(); - if default_account_code_hash_on_l1 != u256_to_h256(DEFAULT_ACCOUNT_CODE.hash) { - return Err(CircuitBreakerError::MismatchedCodeHash( - MismatchedCodeHashError::DefaultAccount { - server_hash: u256_to_h256(DEFAULT_ACCOUNT_CODE.hash), - contract_hash: default_account_code_hash_on_l1, - }, - )); - } - Ok(()) - } -} diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index 034581ab2822..611f922cf56e 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -6,11 +6,9 @@ use tokio::sync::watch; use zksync_config::configs::chain::CircuitBreakerConfig; -use crate::code_hashes::MismatchedCodeHashError; use crate::facet_selectors::MismatchedFacetSelectorsError; use crate::vks::VerifierError; -pub mod code_hashes; pub mod facet_selectors; pub mod l1_txs; pub mod utils; @@ -20,8 +18,6 @@ pub mod vks; pub enum CircuitBreakerError { #[error("System has failed L1 transaction")] FailedL1Transaction, - #[error("Mismatched code hash: {0}")] - MismatchedCodeHash(MismatchedCodeHashError), #[error("Verifier error: {0}")] Verifier(VerifierError), #[error("Mismatched facet selectors: {0}")] diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 62c20909232f..ac606d955d9b 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -64,6 +64,10 @@ pub struct Web3JsonRpc { pub estimate_gas_scale_factor: f64, /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. pub estimate_gas_acceptable_overestimation: u32, + /// Max possible size of an ABI encoded tx (in bytes). + pub max_tx_size: usize, + /// Main node URL - used only by external node to proxy transactions to. + pub main_node_url: Option, } impl Web3JsonRpc { @@ -169,6 +173,8 @@ mod tests { estimate_gas_scale_factor: 1.0f64, gas_price_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, + max_tx_size: 1000000, + main_node_url: None, }, explorer: Explorer { port: 3070, @@ -205,6 +211,7 @@ API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 API_WEB3_JSON_RPC_ACCOUNT_PKS=0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002 API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 +API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 API_EXPLORER_PORT="3070" API_EXPLORER_URL="http://127.0.0.1:3070" API_EXPLORER_NETWORK_STATS_POLLING_INTERVAL="1000" diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 3af005531e76..a6c7d7268915 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use std::time::Duration; // Local uses use zksync_basic_types::network::Network; -use zksync_basic_types::Address; +use zksync_basic_types::{Address, H256}; use crate::envy_load; @@ -48,12 +48,15 @@ pub struct Eth { #[derive(Debug, Deserialize, Clone, PartialEq, Default)] pub struct StateKeeperConfig { - /// Detones the amount of slots for transactions in the block. + /// The max number of slots for txs in a block before it should be sealed by the slots sealer. pub transaction_slots: usize, + /// Number of ms after which an L1 batch is going to be unconditionally sealed. pub block_commit_deadline_ms: u64, + /// Number of ms after which a miniblock should be sealed by the timeout sealer. pub miniblock_commit_deadline_ms: u64, + /// The max number of gas to spend on an L1 tx before its batch should be sealed by the gas sealer. pub max_single_tx_gas: u32, pub max_allowed_l2_tx_gas_limit: u32, @@ -77,6 +80,15 @@ pub struct StateKeeperConfig { pub fee_account_addr: Address, pub reexecute_each_tx: bool, + + /// The price the operator spends on 1 gas of computation in wei. + pub fair_l2_gas_price: u64, + + pub bootloader_hash: H256, + pub default_aa_hash: H256, + + /// Max number of computational gas that validation step is allowed to take. + pub validation_computational_gas_limit: u32, } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -147,6 +159,10 @@ mod tests { fee_account_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"), reject_tx_at_gas_percentage: 0.5, reexecute_each_tx: true, + fair_l2_gas_price: 250000000, + bootloader_hash: H256::from(&[254; 32]), + default_aa_hash: H256::from(&[254; 32]), + validation_computational_gas_limit: 10_000_000, }, operations_manager: OperationsManager { delay_interval: 100, @@ -183,6 +199,10 @@ CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE="0.5" CHAIN_STATE_KEEPER_REEXECUTE_EACH_TX="true" CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS="2500" CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS="1000" +CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE="250000000" +CHAIN_STATE_KEEPER_BOOTLOADER_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" +CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" +CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" diff --git a/core/lib/config/src/configs/circuit_synthesizer.rs b/core/lib/config/src/configs/circuit_synthesizer.rs index b64c950af54b..7f1a8e7012e3 100644 --- a/core/lib/config/src/configs/circuit_synthesizer.rs +++ b/core/lib/config/src/configs/circuit_synthesizer.rs @@ -21,6 +21,8 @@ pub struct CircuitSynthesizerConfig { pub prometheus_listener_port: u16, pub prometheus_pushgateway_url: String, pub prometheus_push_interval_ms: Option, + // Group id for this synthesizer, synthesizer running the same circuit types shall have same group id. + pub prover_group_id: u8, } impl CircuitSynthesizerConfig { @@ -61,6 +63,7 @@ mod tests { prometheus_listener_port: 3314, prometheus_pushgateway_url: "http://127.0.0.1:9091".to_string(), prometheus_push_interval_ms: Some(100), + prover_group_id: 0, } } @@ -75,6 +78,7 @@ mod tests { CIRCUIT_SYNTHESIZER_PROMETHEUS_LISTENER_PORT=3314 CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" CIRCUIT_SYNTHESIZER_PROMETHEUS_PUSH_INTERVAL_MS=100 + CIRCUIT_SYNTHESIZER_PROVER_GROUP_ID=0 "#; set_env(config); let actual = CircuitSynthesizerConfig::from_env(); diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 81481e968f01..09d2ac3c814d 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -17,6 +17,7 @@ pub struct ContractsConfig { pub diamond_init_addr: Address, pub diamond_upgrade_init_addr: Address, pub diamond_proxy_addr: Address, + pub validator_timelock_addr: Address, pub genesis_tx_hash: H256, pub l1_erc20_bridge_proxy_addr: Address, pub l1_erc20_bridge_impl_addr: Address, @@ -47,6 +48,7 @@ mod tests { diamond_init_addr: addr("FFC35A5e767BE36057c34586303498e3de7C62Ba"), diamond_upgrade_init_addr: addr("FFC35A5e767BE36057c34586303498e3de7C62Ba"), diamond_proxy_addr: addr("F00B988a98Ca742e7958DeF9F7823b5908715f4a"), + validator_timelock_addr: addr("F00B988a98Ca742e7958DeF9F7823b5908715f4a"), genesis_tx_hash: hash( "b99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e", ), @@ -70,6 +72,7 @@ CONTRACTS_VERIFIER_ADDR="0x34782eE00206EAB6478F2692caa800e4A581687b" CONTRACTS_DIAMOND_INIT_ADDR="0xFFC35A5e767BE36057c34586303498e3de7C62Ba" CONTRACTS_DIAMOND_UPGRADE_INIT_ADDR="0xFFC35A5e767BE36057c34586303498e3de7C62Ba" CONTRACTS_DIAMOND_PROXY_ADDR="0xF00B988a98Ca742e7958DeF9F7823b5908715f4a" +CONTRACTS_VALIDATOR_TIMELOCK_ADDR="0xF00B988a98Ca742e7958DeF9F7823b5908715f4a" CONTRACTS_GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_ALLOW_LIST_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index afbc65511553..41ab695e30bf 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -43,6 +43,8 @@ pub struct SenderConfig { pub wait_confirmations: u64, /// Node polling period in seconds. pub tx_poll_period: u64, + /// Aggregate txs polling period in seconds. + pub aggregate_tx_poll_period: u64, /// The maximum number of unconfirmed Ethereum transactions. pub max_txs_in_flight: u64, /// The mode in which proofs are sent. @@ -56,6 +58,11 @@ pub struct SenderConfig { pub aggregated_block_prove_deadline: u64, pub aggregated_block_execute_deadline: u64, pub timestamp_criteria_max_allowed_lag: usize, + + /// L1 batches will only be executed on L1 contract after they are at least this number of seconds old. + /// Note that this number must be slightly higher than the one set on the contract, + /// because the contract uses block.timestamp which lags behind the clock time. + pub l1_batch_min_age_before_execute_seconds: Option, } impl SenderConfig { @@ -63,6 +70,10 @@ impl SenderConfig { pub fn tx_poll_period(&self) -> Duration { Duration::from_secs(self.tx_poll_period) } + /// Converts `self.aggregate_tx_poll_period` into `Duration`. + pub fn aggregate_tx_poll_period(&self) -> Duration { + Duration::from_secs(self.aggregate_tx_poll_period) + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq)] @@ -77,6 +88,8 @@ pub struct GasAdjusterConfig { pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, + /// If equal to Some(x), then it will always provide `x` as the L1 gas price + pub internal_enforced_l1_gas_price: Option, /// Node polling period in seconds pub poll_period: u64, } @@ -108,12 +121,14 @@ mod tests { max_aggregated_blocks_to_execute: 4, wait_confirmations: 1, tx_poll_period: 3, + aggregate_tx_poll_period: 3, max_txs_in_flight: 3, operator_private_key: hash( "27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be", ), operator_commit_eth_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"), proof_sending_mode: ProofSendingMode::SkipEveryProof, + l1_batch_min_age_before_execute_seconds: Some(1000), }, gas_adjuster: GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -121,6 +136,7 @@ mod tests { pricing_formula_parameter_a: 1.5, pricing_formula_parameter_b: 1.0005, internal_l1_pricing_multiplier: 0.8, + internal_enforced_l1_gas_price: None, poll_period: 15, }, } @@ -131,6 +147,7 @@ mod tests { let config = r#" ETH_SENDER_SENDER_WAIT_CONFIRMATIONS="1" ETH_SENDER_SENDER_TX_POLL_PERIOD="3" +ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD="3" ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT="3" ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" @@ -151,6 +168,7 @@ ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE="4000" ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" +ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" "#; set_env(config); diff --git a/core/lib/config/src/configs/object_store.rs b/core/lib/config/src/configs/object_store.rs index cf15852c701c..e61197f7cec1 100644 --- a/core/lib/config/src/configs/object_store.rs +++ b/core/lib/config/src/configs/object_store.rs @@ -4,7 +4,6 @@ use serde::Deserialize; /// Configuration for the object store #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ObjectStoreConfig { - pub service_account_path: String, pub bucket_base_url: String, pub mode: String, pub file_backed_base_path: String, @@ -23,7 +22,6 @@ mod tests { fn expected_config() -> ObjectStoreConfig { ObjectStoreConfig { - service_account_path: "/path/to/service_account.json".to_string(), bucket_base_url: "/base/url".to_string(), mode: "FileBacked".to_string(), file_backed_base_path: "artifacts".to_string(), @@ -33,7 +31,6 @@ mod tests { #[test] fn from_env() { let config = r#" -OBJECT_STORE_SERVICE_ACCOUNT_PATH="/path/to/service_account.json" OBJECT_STORE_BUCKET_BASE_URL="/base/url" OBJECT_STORE_MODE="FileBacked" OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" diff --git a/core/lib/config/src/configs/prover_group.rs b/core/lib/config/src/configs/prover_group.rs index 507ce56f95fa..65ab19af21b6 100644 --- a/core/lib/config/src/configs/prover_group.rs +++ b/core/lib/config/src/configs/prover_group.rs @@ -16,6 +16,7 @@ pub struct ProverGroupConfig { pub group_7_circuit_ids: Vec, pub group_8_circuit_ids: Vec, pub group_9_circuit_ids: Vec, + pub region_read_url: String, } impl ProverGroupConfig { @@ -39,6 +40,10 @@ impl ProverGroupConfig { } } + pub fn is_specialized_group_id(&self, group_id: u8) -> bool { + group_id <= 9 + } + pub fn get_group_id_for_circuit_id(&self, circuit_id: u8) -> Option { let configs = [ &self.group_0_circuit_ids, @@ -78,6 +83,7 @@ mod tests { group_7_circuit_ids: vec![14, 15], group_8_circuit_ids: vec![16, 17], group_9_circuit_ids: vec![3], + region_read_url: "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location".to_string(), } } @@ -92,6 +98,7 @@ mod tests { PROVER_GROUP_GROUP_7_CIRCUIT_IDS="14,15" PROVER_GROUP_GROUP_8_CIRCUIT_IDS="16,17" PROVER_GROUP_GROUP_9_CIRCUIT_IDS="3" + PROVER_GROUP_REGION_READ_URL="http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location" "#; #[test] diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs index d11bf4427b46..68abe45d6c86 100644 --- a/core/lib/config/src/configs/witness_generator.rs +++ b/core/lib/config/src/configs/witness_generator.rs @@ -3,6 +3,7 @@ use std::time::Duration; // Built-in uses // External uses use serde::Deserialize; + // Local uses use crate::envy_load; @@ -17,33 +18,14 @@ pub struct WitnessGeneratorConfig { pub key_download_url: String, /// Max attempts for generating witness pub max_attempts: u32, - /// Is sampling enabled - pub sampling_enabled: bool, - /// Safe prover lag to process block - pub sampling_safe_prover_lag: Option, - /// Max prover lag to process block - pub sampling_max_prover_lag: Option, + // Percentage of the blocks that gets proven in the range [0.0, 1.0] + // when 0.0 implies all blocks are skipped and 1.0 implies all blocks are proven. + pub blocks_proving_percentage: Option, pub dump_arguments_for_blocks: Vec, -} - -#[derive(Debug, Clone, Copy)] -pub struct SamplingParams { - pub safe_prover_lag: usize, - pub max_prover_lag: usize, -} - -impl SamplingParams { - pub fn calculate_sampling_probability(&self, prover_lag: usize) -> f64 { - let numerator = self.max_prover_lag as f64 - prover_lag as f64; - let denominator = (self.max_prover_lag - self.safe_prover_lag).max(1) as f64; - (numerator / denominator).min(1f64).max(0f64) - } -} - -#[derive(Debug, Clone, Copy)] -pub enum SamplingMode { - Enabled(SamplingParams), - Disabled, + // Optional l1 batch number to process block until(inclusive). + // This parameter is used in case of performing circuit upgrades(VK/Setup keys), + // to not let witness-generator pick new job and finish all the existing jobs with old circuit. + pub last_l1_batch_to_process: Option, } impl WitnessGeneratorConfig { @@ -55,38 +37,26 @@ impl WitnessGeneratorConfig { Duration::from_secs(self.generation_timeout_in_secs as u64) } - pub fn sampling_mode(&self) -> SamplingMode { - match ( - self.sampling_enabled, - self.sampling_safe_prover_lag, - self.sampling_max_prover_lag, - ) { - (true, Some(safe_prover_lag), Some(max_prover_lag)) => { - SamplingMode::Enabled(SamplingParams { - safe_prover_lag, - max_prover_lag, - }) - } - _ => SamplingMode::Disabled, - } + pub fn last_l1_batch_to_process(&self) -> u32 { + self.last_l1_batch_to_process.unwrap_or(u32::MAX) } } #[cfg(test)] mod tests { - use super::*; use crate::configs::test_utils::set_env; + use super::*; + fn expected_config() -> WitnessGeneratorConfig { WitnessGeneratorConfig { generation_timeout_in_secs: 900u16, initial_setup_key_path: "key".to_owned(), key_download_url: "value".to_owned(), max_attempts: 4, - sampling_enabled: true, - sampling_safe_prover_lag: Some(50), - sampling_max_prover_lag: Some(300), + blocks_proving_percentage: Some(30), dump_arguments_for_blocks: vec![2, 3], + last_l1_batch_to_process: None, } } @@ -97,10 +67,8 @@ mod tests { WITNESS_INITIAL_SETUP_KEY_PATH="key" WITNESS_KEY_DOWNLOAD_URL="value" WITNESS_MAX_ATTEMPTS=4 - WITNESS_SAMPLING_ENABLED=true - WITNESS_SAMPLING_SAFE_PROVER_LAG=50 - WITNESS_SAMPLING_MAX_PROVER_LAG=300 WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" + WITNESS_BLOCKS_PROVING_PERCENTAGE="30" "#; set_env(config); let actual = WitnessGeneratorConfig::from_env(); diff --git a/core/lib/config/src/constants/contracts.rs b/core/lib/config/src/constants/contracts.rs index 3fe4349fed0a..5d81ca907407 100644 --- a/core/lib/config/src/constants/contracts.rs +++ b/core/lib/config/src/constants/contracts.rs @@ -73,6 +73,11 @@ pub const EVENT_WRITER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x0d, ]); +pub const BYTECODE_COMPRESSOR_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x0e, +]); + /// The `ecrecover` system contract address. pub const ECRECOVER_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/core/lib/config/src/constants/ethereum.rs b/core/lib/config/src/constants/ethereum.rs index d4b099ff865b..61889d8afba5 100644 --- a/core/lib/config/src/constants/ethereum.rs +++ b/core/lib/config/src/constants/ethereum.rs @@ -19,3 +19,6 @@ pub const MAX_L2_TX_GAS_LIMIT: u64 = 80000000; // transactions so that they are able to send at least GUARANTEED_PUBDATA_PER_L1_BATCH bytes per // transaction. pub const MAX_GAS_PER_PUBDATA_BYTE: u64 = MAX_L2_TX_GAS_LIMIT / GUARANTEED_PUBDATA_PER_L1_BATCH; + +// The L1->L2 are required to have the following gas per pubdata byte. +pub const REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE: u64 = 800; diff --git a/core/lib/config/src/constants/fees/mod.rs b/core/lib/config/src/constants/fees/mod.rs index c489b6d9a09a..cbe9f36d0b73 100644 --- a/core/lib/config/src/constants/fees/mod.rs +++ b/core/lib/config/src/constants/fees/mod.rs @@ -37,9 +37,6 @@ pub struct IntrinsicSystemGasConstants { /// Note that it is bigger than 16 to account for potential overhead pub const L1_GAS_PER_PUBDATA_BYTE: u32 = 17; -/// The price the operator spends on 1 gas of computation in wei. (0.5 gwei) -pub const FAIR_L2_GAS_PRICE: u64 = 500000000; - /// The amount of pubdata that is strictly guaranteed to be available for a block pub const GUARANTEED_PUBDATA_IN_TX: u32 = 100000; diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index 2dd7abb55507..61cde99c42bf 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -12,6 +12,7 @@ categories = ["cryptography"] [dependencies] ethabi = "16.0.0" serde_json = "1.0" +serde = "1.0" zksync_utils = { path = "../utils", version = "1.0" } once_cell = "1.7" hex = "0.4" diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 4ddce9e04b74..a632bee74d26 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -1,13 +1,20 @@ #![allow(clippy::derive_partial_eq_without_eq)] -use ethabi::ethereum_types::U256; +use ethabi::ethereum_types::{H256, U256}; use ethabi::Contract; use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; + use std::fs::{self, File}; use std::path::Path; - use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_utils::bytes_to_be_words; + +#[derive(Debug)] +pub enum ContractLanguage { + Sol, + Yul, +} const ZKSYNC_CONTRACT_FILE: &str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/interfaces/IZkSync.sol/IZkSync.json"; @@ -136,11 +143,21 @@ pub fn default_erc20_bytecode() -> Vec { read_bytecode("etc/ERC20/artifacts-zk/contracts/ZkSyncERC20.sol/ZkSyncERC20.json") } -pub fn read_sys_contract_bytecode(directory: &str, name: &str) -> Vec { - read_bytecode(format!( - "etc/system-contracts/artifacts-zk/cache-zk/solpp-generated-contracts/{0}{1}.sol/{1}.json", - directory, name - )) +pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { + match lang { + ContractLanguage::Sol => { + read_bytecode(format!( + "etc/system-contracts/artifacts-zk/cache-zk/solpp-generated-contracts/{0}{1}.sol/{1}.json", + directory, name + )) + }, + ContractLanguage::Yul => { + read_zbin_bytecode(format!( + "etc/system-contracts/contracts/{0}artifacts/{1}.yul/{1}.yul.zbin", + directory, name + )) + } + } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { @@ -178,26 +195,31 @@ pub fn read_zbin_bytecode(zbin_path: impl AsRef) -> Vec { .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } -pub fn read_bootloader_bytecode() -> Vec { - read_zbin_bytecode("etc/system-contracts/bootloader/artifacts/bootloader/bootloader.yul.zbin") -} - /// Hash of code and code which consists of 32 bytes words -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SystemContractCode { pub code: Vec, - pub hash: U256, + pub hash: H256, } -pub static PROVED_BLOCK_BOOTLOADER_CODE: Lazy = Lazy::new(|| { - let bytecode = read_proved_block_bootloader_bytecode(); - let hash = hash_bytecode(&bytecode); +#[derive(Debug, Clone)] +pub struct BaseSystemContracts { + pub bootloader: SystemContractCode, + pub default_aa: SystemContractCode, +} - SystemContractCode { - code: bytes_to_be_words(bytecode), - hash: h256_to_u256(hash), +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] +pub struct BaseSystemContractsHashes { + pub bootloader: H256, + pub default_aa: H256, +} + +impl PartialEq for BaseSystemContracts { + fn eq(&self, other: &Self) -> bool { + self.bootloader.hash == other.bootloader.hash + && self.default_aa.hash == other.default_aa.hash } -}); +} pub static PLAYGROUND_BLOCK_BOOTLOADER_CODE: Lazy = Lazy::new(|| { let bytecode = read_playground_block_bootloader_bytecode(); @@ -205,7 +227,7 @@ pub static PLAYGROUND_BLOCK_BOOTLOADER_CODE: Lazy = Lazy::ne SystemContractCode { code: bytes_to_be_words(bytecode), - hash: h256_to_u256(hash), + hash, } }); @@ -215,16 +237,38 @@ pub static ESTIMATE_FEE_BLOCK_CODE: Lazy = Lazy::new(|| { SystemContractCode { code: bytes_to_be_words(bytecode), - hash: h256_to_u256(hash), + hash, } }); -pub static DEFAULT_ACCOUNT_CODE: Lazy = Lazy::new(|| { - let bytecode = read_sys_contract_bytecode("", "DefaultAccount"); - let hash = hash_bytecode(&bytecode); +impl BaseSystemContracts { + pub fn load_from_disk() -> Self { + let bytecode = read_proved_block_bootloader_bytecode(); + let hash = hash_bytecode(&bytecode); - SystemContractCode { - code: bytes_to_be_words(bytecode), - hash: h256_to_u256(hash), + let bootloader = SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }; + + let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); + let hash = hash_bytecode(&bytecode); + + let default_aa = SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }; + + BaseSystemContracts { + bootloader, + default_aa, + } } -}); + + pub fn hashes(&self) -> BaseSystemContractsHashes { + BaseSystemContractsHashes { + bootloader: self.bootloader.hash, + default_aa: self.default_aa.hash, + } + } +} diff --git a/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.down.sql b/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.down.sql new file mode 100644 index 000000000000..7f43e3b91b6c --- /dev/null +++ b/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.down.sql @@ -0,0 +1 @@ +DROP index ix_prover_jobs_t1; diff --git a/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.up.sql b/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.up.sql new file mode 100644 index 000000000000..004a58fb5a35 --- /dev/null +++ b/core/lib/dal/migrations/20221222120017_add_index_to_reduce_load_in_prover_jobs.up.sql @@ -0,0 +1 @@ +create index if not exists ix_prover_jobs_t1 on prover_jobs (aggregation_round DESC, l1_batch_number ASC, id ASC) where status in ('queued', 'in_progress', 'failed'); diff --git a/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.down.sql b/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.down.sql new file mode 100644 index 000000000000..49001fe21b05 --- /dev/null +++ b/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks DROP bootloader_code_hash; +ALTER TABLE miniblocks DROP default_aa_code_hash; diff --git a/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.up.sql b/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.up.sql new file mode 100644 index 000000000000..9e644a9bc6d6 --- /dev/null +++ b/core/lib/dal/migrations/20221229154428_add-bootloader-aa-code-hash.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks ADD bootloader_code_hash BYTEA; +ALTER TABLE miniblocks ADD default_aa_code_hash BYTEA; diff --git a/core/lib/dal/migrations/20230215114053_verification-request-is-system.down.sql b/core/lib/dal/migrations/20230215114053_verification-request-is-system.down.sql new file mode 100644 index 000000000000..cab87d155429 --- /dev/null +++ b/core/lib/dal/migrations/20230215114053_verification-request-is-system.down.sql @@ -0,0 +1 @@ +ALTER TABLE contract_verification_requests DROP COLUMN IF EXISTS is_system; diff --git a/core/lib/dal/migrations/20230215114053_verification-request-is-system.up.sql b/core/lib/dal/migrations/20230215114053_verification-request-is-system.up.sql new file mode 100644 index 000000000000..2186c2997d57 --- /dev/null +++ b/core/lib/dal/migrations/20230215114053_verification-request-is-system.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE contract_verification_requests + ADD COLUMN IF NOT EXISTS is_system BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.down.sql b/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.down.sql new file mode 100644 index 000000000000..78352a1beb60 --- /dev/null +++ b/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE gpu_prover_queue + DROP COLUMN IF EXISTS region; diff --git a/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.up.sql b/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.up.sql new file mode 100644 index 000000000000..b4eabf6f6d39 --- /dev/null +++ b/core/lib/dal/migrations/20230221112209_add_region_column_in_gpu_prover_queue.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE gpu_prover_queue + ADD COLUMN IF NOT EXISTS region TEXT + NOT NULL + DEFAULT('prior to enabling multi-region'); diff --git a/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.down.sql b/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.down.sql new file mode 100644 index 000000000000..d85c1c01ee8e --- /dev/null +++ b/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE gpu_prover_queue DROP CONSTRAINT IF EXISTS gpu_prover_queue_pkey; + +ALTER TABLE gpu_prover_queue ADD CONSTRAINT gpu_prover_queue_pkey PRIMARY KEY (instance_host, instance_port); diff --git a/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.up.sql b/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.up.sql new file mode 100644 index 000000000000..597c295e6c75 --- /dev/null +++ b/core/lib/dal/migrations/20230221113734_adjust_primary_key_in_gpu_prover_queue.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE gpu_prover_queue DROP CONSTRAINT IF EXISTS gpu_prover_queue_pkey; + +ALTER TABLE gpu_prover_queue ADD CONSTRAINT gpu_prover_queue_pkey PRIMARY KEY (instance_host, instance_port, region); diff --git a/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.down.sql b/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.down.sql new file mode 100644 index 000000000000..037c7aaa64fa --- /dev/null +++ b/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS prover_jobs_circuit_type_and_status_index; diff --git a/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.up.sql b/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.up.sql new file mode 100644 index 000000000000..8f0bbaf51b3c --- /dev/null +++ b/core/lib/dal/migrations/20230228095320_add_circuit_type_status_index_in_prover_jobs.up.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS prover_jobs_circuit_type_and_status_index ON prover_jobs (circuit_type, status); diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index ac5ce1d5900f..d4a3342160a8 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -27,26 +27,6 @@ }, "query": "\n SELECT (SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1) as \"initial_write_l1_batch_number?\",\n (SELECT miniblocks.l1_batch_number FROM miniblocks WHERE number = $2) as \"current_l1_batch_number?\"\n " }, - "00bf0b01e0ee03cd3fb8f1c88ac42c535532c55a15d23c57c32561cdffd91455": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT l1_batch_number FROM node_aggregation_witness_jobs\n WHERE length(leaf_layer_subqueues) <> 0\n OR length(aggregation_outputs) <> 0\n LIMIT $1;\n " - }, "01189407fab9be050ae75249f75b9503343500af700f00721e295871fa969172": { "describe": { "columns": [ @@ -135,87 +115,6 @@ }, "query": "\n SELECT l1_address, l2_address, symbol, name, decimals, usd_price\n FROM tokens\n WHERE l2_address = $1\n " }, - "026012668b1efe74d34e84d24b3c23a462e54846ddf854cae5eeac923d2468be": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "merkle_tree_paths", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "created_at", - "ordinal": 2, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 3, - "type_info": "Timestamp" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "time_taken", - "ordinal": 5, - "type_info": "Time" - }, - { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "error", - "ordinal": 7, - "type_info": "Varchar" - }, - { - "name": "attempts", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "merkel_tree_paths_blob_url", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 10, - "type_info": "Bool" - } - ], - "nullable": [ - false, - true, - false, - false, - false, - false, - true, - true, - false, - true, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int4" - ] - } - }, - "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " - }, "03a34f0fd82bed22f14c5b36554bb958d407e9724fa5ea5123edc3c6607e545c": { "describe": { "columns": [ @@ -308,6 +207,24 @@ }, "query": "\n WITH events_select AS (\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE miniblock_number > $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n )\n SELECT miniblocks.hash as \"block_hash?\",\n address as \"address!\", topic1 as \"topic1!\", topic2 as \"topic2!\", topic3 as \"topic3!\", topic4 as \"topic4!\", value as \"value!\",\n miniblock_number as \"miniblock_number!\", miniblocks.l1_batch_number as \"l1_batch_number?\", tx_hash as \"tx_hash!\",\n tx_index_in_block as \"tx_index_in_block!\", event_index_in_block as \"event_index_in_block!\", event_index_in_tx as \"event_index_in_tx!\"\n FROM events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, + "077913dcb33f255fad3f6d81a46a5acad9074cf5c03216430ca1a959825a057a": { + "describe": { + "columns": [ + { + "name": "max", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MAX(l1_batch_number) FROM witness_inputs\n WHERE merkel_tree_paths_blob_url IS NOT NULL\n " + }, "07f14f401347d74b8bb3595f5ec75e6379a8af0e2e4cbd5ee78d70583925d60b": { "describe": { "columns": [ @@ -825,38 +742,6 @@ }, "query": "UPDATE transactions SET in_mempool = FALSE FROM UNNEST ($1::bytea[]) AS s(address) WHERE transactions.in_mempool = TRUE AND transactions.initiator_address = s.address" }, - "0d4dff244f0ea6685f9c2d9a7f639c9935ce30c7dfd2bf61784b6902aa1a7790": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "basic_circuits_blob_url", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "basic_circuits_inputs_blob_url", - "ordinal": 2, - "type_info": "Text" - } - ], - "nullable": [ - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '2 days'\n LIMIT $1;\n " - }, "0d99b4015b29905862991e4f1a44a1021d48f50e99cb1701e7496ce6c3e15dc6": { "describe": { "columns": [ @@ -875,18 +760,6 @@ }, "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" }, - "0dec12063dac83663f109ff19174ccb53b7f1e710679e65f96d86d90887b848a": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET leaf_layer_subqueues='',\n aggregation_outputs=''\n WHERE l1_batch_number = ANY($1);\n " - }, "0f8a603899280c015b033c4160bc064865103e9d6d63a369f07a8e5d859a7b14": { "describe": { "columns": [ @@ -1101,6 +974,110 @@ }, "query": "\n WITH sl AS (\n SELECT * FROM storage_logs\n WHERE storage_logs.address = $1 AND storage_logs.tx_hash = $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n )\n SELECT\n transactions.hash as tx_hash,\n transactions.index_in_block as index_in_block,\n transactions.l1_batch_tx_index as l1_batch_tx_index,\n transactions.miniblock_number as block_number,\n transactions.error as error,\n transactions.effective_gas_price as effective_gas_price,\n transactions.initiator_address as initiator_address,\n transactions.data->'to' as \"transfer_to?\",\n transactions.data->'contractAddress' as \"execute_contract_address?\",\n transactions.tx_format as \"tx_format?\",\n transactions.refunded_gas as refunded_gas,\n transactions.gas_limit as gas_limit,\n miniblocks.hash as \"block_hash?\",\n miniblocks.l1_batch_number as \"l1_batch_number?\",\n sl.key as \"contract_address?\"\n FROM transactions\n LEFT JOIN miniblocks\n ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN sl\n ON sl.value != $3\n WHERE transactions.hash = $2\n " }, + "1c1a4cdf476de4f4cc83a31151fc4c407b93b53e2cd995f8bb5222d0a3c38c47": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "l1_tx_count", + "ordinal": 2, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 13, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 14, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n l1_batches.l1_gas_price,\n l1_batches.l2_fair_gas_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE l1_batches.number = $1\n " + }, "1d26bb777f103d83523d223071eaa8391049c0efec9406e37003ac08065d389f": { "describe": { "columns": [], @@ -1704,20 +1681,37 @@ }, "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n " }, - "28b5117758d0e82672351c0cc2dbbfbe7b27e785d7a3d7e8d3ddde76b6aa2974": { + "252c1398bf08802e9dc038f7c9d95cc9d56cbf760d7de5a48f014478850daede": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "scheduler_witness_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "final_node_aggregations_blob_url", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + true + ], "parameters": { "Left": [ - "Text", - "Int4", - "Int4", - "Int2" + "Int8" ] } }, - "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, now(), now())\n ON CONFLICT(instance_host, instance_port)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, updated_at=now()" + "query": "\n SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND updated_at < NOW() - INTERVAL '30 days'\n AND scheduler_witness_blob_url is NOT NULL\n AND final_node_aggregations_blob_url is NOT NULL\n LIMIT $1;\n " }, "2911797974d340cc75bb628866c24f77665e3dca3954f0c83860da488265f5c6": { "describe": { @@ -1848,6 +1842,31 @@ }, "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contracts_verification_info\n WHERE address = $1\n " }, + "2f5f9182c87944bf7856ee8e6036e49118477c62d3085c4bab32150f268dfa58": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bool", + "Bytea", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int8" + ] + } + }, + "query": "\n UPDATE l1_batches SET\n hash = $1, merkle_root_hash = $2, commitment = $3, \n compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6,\n l2_l1_merkle_root = $7, zkporter_is_available = $8, \n parent_hash = $9, rollup_last_leaf_index = $10, \n aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13,\n updated_at = NOW()\n WHERE number = $14 AND hash IS NULL\n " + }, "2ff4a13a75537cc30b2c3d52d3ef6237850150e4a4569adeaa4da4a9ac5bc689": { "describe": { "columns": [ @@ -1869,18 +1888,6 @@ }, "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2" }, - "30a51f6a7d0146fc74d411e5ee1cee44550251c0d8e814891984ecc462bc0bcb": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations=''\n WHERE l1_batch_number = ANY($1);\n " - }, "3221b722354995f0705ceaf913a48aa092129bb4ff561a1104196f5b25192576": { "describe": { "columns": [ @@ -1917,27 +1924,7 @@ }, "query": "SELECT MAX(number) as \"number\" FROM miniblocks" }, - "3594189e579d00c5477476e999aef22fe0dff97c753db118270285e26a9e4366": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT l1_batch_number FROM leaf_aggregation_witness_jobs\n WHERE length(basic_circuits) <> 0\n OR length(basic_circuits_inputs) <> 0\n LIMIT $1;\n " - }, - "35ef2dc2ac64f27e24679288e5a4f56ad03369cd9771cb4b2bc3dc17906d21e8": { + "36c483775b604324eacd7e5aac591b927cc32abb89fe1b0c5cf4b0383e9bd443": { "describe": { "columns": [ { @@ -1967,7 +1954,7 @@ ] } }, - "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '2 days'\n LIMIT $1;\n " + "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, "393345441797999e9f11b8b5ddce0b64356e1e167056d7f76ef6dfffd3534607": { "describe": { @@ -2249,6 +2236,23 @@ }, "query": "\n SELECT transactions.*,\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, + "3b0bfc7445faaa87f6cabb68419ebff995120d65db3a4def70d998507e699811": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "ByteaArray", + "Int4Array", + "VarcharArray", + "JsonbArray", + "Int8Array" + ] + } + }, + "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::bigint[]) as refunded_gas\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " + }, "3c582aeed32235ef175707de412a9f9129fad6ea5e87ebb85f68e20664b0da46": { "describe": { "columns": [], @@ -2263,6 +2267,33 @@ }, "query": "\n UPDATE transactions\n SET \n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = now()\n FROM\n (SELECT\n UNNEST($1::int[]) AS l1_batch_tx_index,\n UNNEST($2::bytea[]) AS hash\n ) AS data_table\n WHERE transactions.hash=data_table.hash \n " }, + "3cb9fd0e023940d4e30032a9b0528a95513468ebf701557153c5f1417bdb847f": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Text", + "Text", + "Text", + "Text", + "Bool", + "Bytea", + "Bool" + ] + } + }, + "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n compiler_zksolc_version,\n compiler_solc_version,\n optimization_used,\n constructor_arguments,\n is_system,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now())\n RETURNING id\n " + }, "3d41f05e1d5c5a74e0605e66fe08e09f14b8bf0269e5dcde518aa08db92a3ea0": { "describe": { "columns": [], @@ -2309,33 +2340,33 @@ }, "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" }, - "3e10488214ce80491123183171bef6b7fd2fbd89a9d2a39230efda23b5cbe65c": { + "3f86b7cb793dd8849af45ff3de4eabb80082a1cf8b213be607e6e13bb3d6710d": { "describe": { "columns": [ { - "name": "instance_host", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Inet" + "type_info": "Int8" }, { - "name": "instance_port", + "name": "basic_circuits", "ordinal": 1, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "instance_status", + "name": "basic_circuits_inputs", "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "number_of_basic_circuits", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "updated_at", + "name": "status", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Text" }, { "name": "processing_started_at", @@ -2343,19 +2374,44 @@ "type_info": "Timestamp" }, { - "name": "queue_free_slots", + "name": "time_taken", "ordinal": 6, - "type_info": "Int4" + "type_info": "Time" }, { - "name": "queue_capacity", + "name": "error", "ordinal": 7, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "specialized_prover_group_id", + "name": "created_at", "ordinal": 8, - "type_info": "Int2" + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "attempts", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "basic_circuits_blob_url", + "ordinal": 11, + "type_info": "Text" + }, + { + "name": "basic_circuits_inputs_blob_url", + "ordinal": 12, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 13, + "type_info": "Bool" } ], "nullable": [ @@ -2367,56 +2423,35 @@ true, true, true, - true + false, + false, + false, + true, + true, + false ], "parameters": { "Left": [ "Interval", - "Int2" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE (instance_host, instance_port) in (\n SELECT instance_host, instance_port\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " - }, - "40a1960bf6dffd5711892edfc2a73c8f2db44aefe1436882dc3bee8447bb67bd": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", + "Int4", "Int8" ] } }, - "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT storage_logs_dedup.hashed_key, storage_logs_dedup.l1_batch_number, now(), now()\n FROM storage_logs_dedup\n WHERE l1_batch_number BETWEEN $1 AND $2\n AND is_write = TRUE\n ON CONFLICT DO NOTHING\n " + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " }, - "40e0d88efb9c9ea0a8630df05d0a9981a13020ad69fafa42358e857fb4f1a93a": { + "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bool", - "Bytea", - "Bytea", "Int8", - "Bytea", - "Bytea", - "Bytea", - "Int8" + "Text" ] } }, - "query": "\n UPDATE l1_batches SET\n hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4,\n compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7,\n l2_l1_merkle_root = $8, zkporter_is_available = $9, \n bootloader_code_hash = $10, parent_hash = $11, rollup_last_leaf_index = $12, \n aux_data_hash = $13, pass_through_data_hash = $14, meta_parameters_hash = $15,\n updated_at = NOW()\n WHERE number = $16 AND hash IS NULL\n " + "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1 AND status != 'queued'\n " }, "41913b02b13a0dad87268c5e0d673d9f04d5207ab6a48b63004e6c3ed07b93bc": { "describe": { @@ -2701,6 +2736,21 @@ }, "query": "\n UPDATE l1_batches\n SET predicted_commit_gas_cost = $2, updated_at = now()\n WHERE number = $1\n " }, + "433d5da4d72150cf2c1e1007ee3ff51edfa51924f4b662b8cf382f06e60fd228": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Text", + "Text" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " + }, "438ea2edcf2e5ec1ec8b05da4d634e914e4d892441b6f2926f0926c7c90e33d1": { "describe": { "columns": [], @@ -2735,68 +2785,27 @@ }, "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM\n (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" }, - "492488cc22bdc88e6fcd9017fdf034b8325ca517b4302ab234e93b38cc3225b9": { + "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "Bool", - "Bytea", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Numeric", - "Int8", - "Int8" + "Interval" ] } }, - "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count,\n timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data,\n predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost,\n initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, now(), now())\n " + "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" }, - "4aafb16cb2b1cc8fa62f3065eefb4a4fa075f1d9c5fd9e61010b9d25d3532bcc": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET basic_circuits='',\n basic_circuits_inputs=''\n WHERE l1_batch_number = ANY($1);\n " - }, - "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { - "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Interval" - ] - } - }, - "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" - }, - "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { + "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { "describe": { "columns": [ { @@ -2906,6 +2915,38 @@ }, "query": "\n UPDATE witness_inputs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " }, + "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int4Array", + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "Int4Array", + "Int4Array", + "VarcharArray", + "NumericArray", + "JsonbArray", + "ByteaArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "Int8" + ] + } + }, + "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " + }, "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { "describe": { "columns": [], @@ -3256,93 +3297,58 @@ }, "query": "SELECT COUNT(*) as \"count!\" FROM transactions\n WHERE miniblock_number > $1 AND miniblock_number IS NOT NULL" }, - "529fd4515b6c71592204e747d5dca9cb98d8863b354e35b8ac3486746fb8b49a": { + "52602518095b2a45fadab7b76218acb6964b416a103be2a3b37b3dac4a970c14": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "timestamp", "ordinal": 1, "type_info": "Int8" }, { - "name": "circuit_type", + "name": "hash", "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "prover_input", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "status", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "error", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Text" + "type_info": "Numeric" }, { - "name": "processing_started_at", + "name": "l1_gas_price", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "created_at", + "name": "l2_fair_gas_price", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "updated_at", + "name": "bootloader_code_hash", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "time_taken", + "name": "default_aa_code_hash", "ordinal": 9, - "type_info": "Time" - }, - { - "name": "aggregation_round", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "result", - "ordinal": 11, "type_info": "Bytea" - }, - { - "name": "sequence_number", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "attempts", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "circuit_input_blob_url", - "ordinal": 14, - "type_info": "Text" - }, - { - "name": "proccesed_by", - "ordinal": 15, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 16, - "type_info": "Bool" } ], "nullable": [ @@ -3351,44 +3357,19 @@ false, false, false, - true, - true, - false, - false, false, false, - true, - false, false, true, - true, - false + true ], "parameters": { "Left": [ - "Interval", - "Int4", - "TextArray" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($3)\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " - }, - "5317ed0be137e9ed32abcd41486f53937b8508f5c6478523aa18826518e5f0ab": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "ByteaArray", - "Int4Array", - "VarcharArray", - "JsonbArray" + "Int8" ] } }, - "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " + "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n WHERE number = $1\n " }, "541d22a9ffe9c7b31833f203af0820cca4513d7a9e6feed7313757674c30e667": { "describe": { @@ -3721,6 +3702,36 @@ }, "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, + "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "circuit_type!", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "status!", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + null, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " + }, "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { "describe": { "columns": [], @@ -3810,6 +3821,35 @@ }, "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " }, + "5a47a5a007ca26497d7015f21d2bf31785ec8e061dbd4c3e847c3b53b40269c4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Jsonb", + "Int8", + "Numeric", + "Numeric", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" + ] + } + }, + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n " + }, "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { "describe": { "columns": [ @@ -3907,6 +3947,30 @@ }, "query": "INSERT INTO factory_deps\n (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT u.bytecode_hash, u.bytecode, $3, now(), now()\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n " }, + "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { + "describe": { + "columns": [ + { + "name": "l1_batch_number!", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "aggregation_round", + "ordinal": 1, + "type_info": "Int4" + } + ], + "nullable": [ + null, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " + }, "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { "describe": { "columns": [ @@ -4061,1006 +4125,730 @@ }, "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "60d8df86205f043af69ff5daee1db8d4d20805bf8dfeddc256ff616e36502cc8": { + "604b41258da640307989571e014e8ccb4f457bba0caedcb42dc1065fc90f7950": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT id FROM prover_jobs\n WHERE length(prover_input) <> 0\n LIMIT $1;\n " - }, - "61f4f5ef369b2435732af17091493876301e3e59b68d6817fe0053c7da89291e": { - "describe": { - "columns": [ + }, { - "name": "max_nonce?", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(nonce) as \"max_nonce?\" FROM eth_txs" - }, - "622735d9d8a0ab3f607b239740a0a2e323cca7026556c4fff95d06ef5ae9d9ba": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Numeric", - "Int8", - "Int8", - "Int8" - ] - } - }, - "query": "\n INSERT INTO miniblocks (\n number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), now())\n " - }, - "623ce93bba053fe78a1e254db5e421c5b51fbafcda1fc5c17eaab3f5fe233122": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Text", - "Text", - "Text", - "Text", - "Bool", - "Bytea" - ] - } - }, - "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n compiler_zksolc_version,\n compiler_solc_version,\n optimization_used,\n constructor_arguments,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n RETURNING id\n " - }, - "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "DELETE FROM eth_txs_history\n WHERE id = $1" - }, - "63616acc2c415f4c8d650a96fd5481a609436a94666d65363eb06808da8da4b8": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1" - }, - "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { - "describe": { - "columns": [ - { - "name": "status", - "ordinal": 0, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "compilation_errors", - "ordinal": 2, - "type_info": "Jsonb" - } - ], - "nullable": [ - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " - }, - "65bf55ff4ac5c4ac60bedd7c5b39d82f6e8793859749a7b6ab56121f623ed840": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "commit_gas?", - "ordinal": 1, + "name": "timestamp", + "ordinal": 1, "type_info": "Int8" }, { - "name": "commit_base_gas_price?", + "name": "is_finished", "ordinal": 2, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "commit_priority_gas_price?", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "prove_gas?", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "prove_base_gas_price?", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "prove_priority_gas_price?", + "name": "bloom", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "execute_gas?", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Int8" + "type_info": "ByteaArray" }, { - "name": "execute_base_gas_price?", + "name": "hash", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "execute_priority_gas_price?", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int8" - } - ], - "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT\n l1_batches.number,\n commit_tx_data.gas_used as \"commit_gas?\",\n commit_tx.base_fee_per_gas as \"commit_base_gas_price?\",\n commit_tx.priority_fee_per_gas as \"commit_priority_gas_price?\",\n prove_tx_data.gas_used as \"prove_gas?\",\n prove_tx.base_fee_per_gas as \"prove_base_gas_price?\",\n prove_tx.priority_fee_per_gas as \"prove_priority_gas_price?\",\n execute_tx_data.gas_used as \"execute_gas?\",\n execute_tx.base_fee_per_gas as \"execute_base_gas_price?\",\n execute_tx.priority_fee_per_gas as \"execute_priority_gas_price?\"\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as commit_tx_data\n ON (l1_batches.eth_commit_tx_id = commit_tx_data.id)\n LEFT JOIN eth_txs_history as prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as prove_tx_data\n ON (l1_batches.eth_prove_tx_id = prove_tx_data.id)\n LEFT JOIN eth_txs_history as execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as execute_tx_data\n ON (l1_batches.eth_execute_tx_id = execute_tx_data.id)\n WHERE l1_batches.number = $1\n " - }, - "66072439a0436906c6273ffdbadca8837f23677f4d47c42cd9053e952789f26b": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" }, { - "name": "basic_circuits", - "ordinal": 1, + "name": "compressed_write_logs", + "ordinal": 11, "type_info": "Bytea" }, { - "name": "basic_circuits_inputs", - "ordinal": 2, + "name": "compressed_contracts", + "ordinal": 12, "type_info": "Bytea" }, { - "name": "number_of_basic_circuits", - "ordinal": 3, + "name": "eth_prove_tx_id", + "ordinal": 13, "type_info": "Int4" }, { - "name": "status", - "ordinal": 4, - "type_info": "Text" + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" }, { - "name": "processing_started_at", - "ordinal": 5, + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, "type_info": "Timestamp" }, { - "name": "time_taken", - "ordinal": 6, - "type_info": "Time" + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" }, { - "name": "error", - "ordinal": 7, - "type_info": "Text" + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" }, { - "name": "created_at", - "ordinal": 8, - "type_info": "Timestamp" + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" }, { - "name": "updated_at", - "ordinal": 9, - "type_info": "Timestamp" + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" }, { - "name": "attempts", - "ordinal": 10, - "type_info": "Int4" + "name": "predicted_commit_gas_cost", + "ordinal": 21, + "type_info": "Int8" }, { - "name": "basic_circuits_blob_url", - "ordinal": 11, - "type_info": "Text" + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" }, { - "name": "basic_circuits_inputs_blob_url", - "ordinal": 12, - "type_info": "Text" + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" }, { - "name": "is_blob_cleaned", - "ordinal": 13, - "type_info": "Bool" - } - ], - "nullable": [ - false, - false, + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 27, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, + "type_info": "Int4" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 36, + "type_info": "Int8" + }, + { + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" + } + ], + "nullable": [ false, false, false, + false, + false, + false, + false, + false, + true, + true, + true, true, true, true, + true, + true, + false, + false, + true, + false, + false, + false, false, false, false, + false, + true, + true, + true, + true, + true, + true, + true, true, true, + false, + false, + true, + true, + true, + false, + false, false ], "parameters": { "Left": [ - "Interval", - "Int4" + "Bytea", + "Bytea", + "Int8" ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " + "query": "SELECT * FROM l1_batches\n WHERE eth_commit_tx_id IS NULL\n AND number != 0\n AND bootloader_code_hash = $1 AND default_aa_code_hash = $2\n AND commitment IS NOT NULL\n ORDER BY number LIMIT $3" }, - "66a3761aec92aa8794e55ddd8299879e915e8ef84f8be9ebca9881c77438d2c8": { + "61f4f5ef369b2435732af17091493876301e3e59b68d6817fe0053c7da89291e": { "describe": { "columns": [ { - "name": "value", + "name": "max_nonce?", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { - "Left": [ - "Bytea", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT value FROM storage_logs\n WHERE hashed_key = $1 AND miniblock_number <= $2\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n " + "query": "SELECT MAX(nonce) as \"max_nonce?\" FROM eth_txs" }, - "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Time", - "Bytea", - "Text", - "Int8" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " - }, - "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { - "describe": { - "columns": [ - { - "name": "total_transactions!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea" + "Int4" ] } }, - "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " + "query": "DELETE FROM eth_txs_history\n WHERE id = $1" }, - "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "63616acc2c415f4c8d650a96fd5481a609436a94666d65363eb06808da8da4b8": { "describe": { - "columns": [ - { - "name": "l2_to_l1_logs", - "ordinal": 0, - "type_info": "ByteaArray" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" + "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1" }, - "6ae4738857a3dc19860b8dc61b75790dee0030d84438bcc311e917cb1a076289": { + "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { "describe": { "columns": [ { - "name": "proof", + "name": "status", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "aggregation_result_coords", + "name": "error", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Text" + }, + { + "name": "compilation_errors", + "ordinal": 2, + "type_info": "Jsonb" } ], "nullable": [ + false, true, true ], "parameters": { "Left": [ - "Int8", "Int8" ] } }, - "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n AND scheduler_witness_jobs.status = 'successful'\n " + "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " }, - "6c81c5a55d595d0790ac20ca202ff3083b0677c47872f2eb1c65e568dd7c156a": { + "65bf55ff4ac5c4ac60bedd7c5b39d82f6e8793859749a7b6ab56121f623ed840": { "describe": { "columns": [ { - "name": "miniblock_number", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "log_index_in_miniblock", + "name": "commit_gas?", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "log_index_in_tx", + "name": "commit_base_gas_price?", "ordinal": 2, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "commit_priority_gas_price?", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "block_hash", + "name": "prove_gas?", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l1_batch_number?", + "name": "prove_base_gas_price?", "ordinal": 5, "type_info": "Int8" }, { - "name": "shard_id", + "name": "prove_priority_gas_price?", "ordinal": 6, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "is_service", + "name": "execute_gas?", "ordinal": 7, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "tx_index_in_miniblock", + "name": "execute_base_gas_price?", "ordinal": 8, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_index_in_l1_batch", + "name": "execute_priority_gas_price?", "ordinal": 9, - "type_info": "Int4" - }, - { - "name": "sender", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "value", - "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ false, + true, false, false, - false, - null, - null, - false, - false, - false, + true, false, false, + true, false, false ], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "\n SELECT\n miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value\n FROM l2_to_l1_logs\n WHERE tx_hash = $1\n ORDER BY log_index_in_tx ASC\n " + "query": "\n SELECT\n l1_batches.number,\n commit_tx_data.gas_used as \"commit_gas?\",\n commit_tx.base_fee_per_gas as \"commit_base_gas_price?\",\n commit_tx.priority_fee_per_gas as \"commit_priority_gas_price?\",\n prove_tx_data.gas_used as \"prove_gas?\",\n prove_tx.base_fee_per_gas as \"prove_base_gas_price?\",\n prove_tx.priority_fee_per_gas as \"prove_priority_gas_price?\",\n execute_tx_data.gas_used as \"execute_gas?\",\n execute_tx.base_fee_per_gas as \"execute_base_gas_price?\",\n execute_tx.priority_fee_per_gas as \"execute_priority_gas_price?\"\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as commit_tx_data\n ON (l1_batches.eth_commit_tx_id = commit_tx_data.id)\n LEFT JOIN eth_txs_history as prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as prove_tx_data\n ON (l1_batches.eth_prove_tx_id = prove_tx_data.id)\n LEFT JOIN eth_txs_history as execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as execute_tx_data\n ON (l1_batches.eth_execute_tx_id = execute_tx_data.id)\n WHERE l1_batches.number = $1\n " }, - "6de96eb86301418de9a4342cd66447afd6eb42759d36e164e36adddbd42e98e2": { + "66a3761aec92aa8794e55ddd8299879e915e8ef84f8be9ebca9881c77438d2c8": { "describe": { "columns": [ { - "name": "number", + "name": "value", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" } ], "nullable": [ false ], "parameters": { - "Left": [] + "Left": [ + "Bytea", + "Int8" + ] } }, - "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE execute_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + "query": "\n SELECT value FROM storage_logs\n WHERE hashed_key = $1 AND miniblock_number <= $2\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n " }, - "6ebe0d6a315050d72ffead2dd695f0ba1926a3f4a1ed56b3f291d0f41b72c4d4": { + "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Bytea", + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " + }, + "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { "describe": { "columns": [ { - "name": "hashed_key!", + "name": "total_transactions!", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "value?", - "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - null, null ], "parameters": { "Left": [ - "ByteaArray", + "Bytea" + ] + } + }, + "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " + }, + "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { + "describe": { + "columns": [ + { + "name": "l2_to_l1_logs", + "ordinal": 0, + "type_info": "ByteaArray" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ "Int8" ] } }, - "query": "\n SELECT u.hashed_key as \"hashed_key!\",\n (SELECT value FROM storage_logs\n WHERE hashed_key = u.hashed_key AND miniblock_number < $2\n ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\"\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n " + "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" }, - "6f9edffc50202b888d12f80e57a2a346d865e522aa5a02fe3fcfa155406227a4": { + "69c3e2cfece5cb9f6989f5cbbea36af2a92addcdb41082541ea41b46fdd0ea1f": { "describe": { "columns": [ { - "name": "hash", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "timestamp", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "full_fee", + "name": "is_finished", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Bool" }, { - "name": "layer_2_tip_fee", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "initiator_address", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "nonce", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "signature", + "name": "bloom", "ordinal": 6, "type_info": "Bytea" }, { - "name": "input", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Bytea" + "type_info": "ByteaArray" }, { - "name": "data", + "name": "hash", "ordinal": 8, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "received_at", + "name": "parent_hash", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "priority_op_id", + "name": "commitment", "ordinal": 10, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_batch_number", + "name": "compressed_write_logs", "ordinal": 11, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "index_in_block", + "name": "compressed_contracts", "ordinal": 12, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "error", + "name": "eth_prove_tx_id", "ordinal": 13, - "type_info": "Varchar" + "type_info": "Int4" }, { - "name": "gas_limit", + "name": "eth_commit_tx_id", "ordinal": 14, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "gas_per_storage_limit", + "name": "eth_execute_tx_id", "ordinal": 15, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "gas_per_pubdata_limit", + "name": "created_at", "ordinal": 16, - "type_info": "Numeric" + "type_info": "Timestamp" }, { - "name": "tx_format", + "name": "updated_at", "ordinal": 17, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "created_at", + "name": "merkle_root_hash", "ordinal": 18, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "l2_to_l1_logs", "ordinal": 19, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "execution_info", + "name": "l2_to_l1_messages", "ordinal": 20, - "type_info": "Jsonb" + "type_info": "ByteaArray" }, { - "name": "contract_address", + "name": "predicted_commit_gas_cost", "ordinal": 21, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "in_mempool", + "name": "predicted_prove_gas_cost", "ordinal": 22, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "l1_block_number", + "name": "predicted_execute_gas_cost", "ordinal": 23, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "value", + "name": "initial_bootloader_heap_content", "ordinal": 24, - "type_info": "Numeric" + "type_info": "Jsonb" }, { - "name": "paymaster", + "name": "used_contract_hashes", "ordinal": 25, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "paymaster_input", + "name": "compressed_initial_writes", "ordinal": 26, "type_info": "Bytea" }, { - "name": "max_fee_per_gas", + "name": "compressed_repeated_writes", "ordinal": 27, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "max_priority_fee_per_gas", + "name": "l2_l1_compressed_messages", "ordinal": 28, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "effective_gas_price", + "name": "l2_l1_merkle_root", "ordinal": 29, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "miniblock_number", + "name": "gas_per_pubdata_byte_in_block", "ordinal": 30, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "l1_batch_tx_index", + "name": "rollup_last_leaf_index", "ordinal": 31, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "refunded_gas", + "name": "zkporter_is_available", "ordinal": 32, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "l1_tx_mint", + "name": "bootloader_code_hash", "ordinal": 33, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "l1_tx_refund_recipient", + "name": "default_aa_code_hash", "ordinal": 34, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - false, - true, - false, - false, - false, - true, - true, - true, - true, - true, - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8", - "Numeric", - "Numeric" - ] - } - }, - "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" - }, - "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int8" - ] - } - }, - "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " - }, - "734fc9cc1ffe10a6c6b56150c0681b6b2757d14b2ea04a289abb1de64dffb172": { - "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" }, { - "name": "is_priority", - "ordinal": 1, - "type_info": "Bool" - }, - { - "name": "full_fee", - "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, + "name": "base_fee_per_gas", + "ordinal": 35, "type_info": "Numeric" }, { - "name": "initiator_address", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, + "name": "gas_per_pubdata_limit", + "ordinal": 36, "type_info": "Int8" }, { - "name": "signature", - "ordinal": 6, + "name": "aux_data_hash", + "ordinal": 37, "type_info": "Bytea" }, { - "name": "input", - "ordinal": 7, + "name": "pass_through_data_hash", + "ordinal": 38, "type_info": "Bytea" }, { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, + "name": "meta_parameters_hash", + "ordinal": 39, "type_info": "Bytea" }, { - "name": "in_mempool", - "ordinal": 22, + "name": "skip_proof", + "ordinal": 40, "type_info": "Bool" }, { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, + "name": "l1_gas_price", + "ordinal": 41, "type_info": "Int8" }, { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, + "name": "l2_fair_gas_price", + "ordinal": 42, "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "block_hash?", - "ordinal": 35, - "type_info": "Bytea" - }, - { - "name": "eth_commit_tx_hash?", - "ordinal": 36, - "type_info": "Text" - }, - { - "name": "eth_prove_tx_hash?", - "ordinal": 37, - "type_info": "Text" - }, - { - "name": "eth_execute_tx_hash?", - "ordinal": 38, - "type_info": "Text" } ], "nullable": [ false, false, - true, - true, false, - true, - true, - true, + false, + false, + false, false, false, true, @@ -5073,10 +4861,11 @@ true, false, false, - false, true, false, - true, + false, + false, + false, false, false, false, @@ -5085,109 +4874,122 @@ true, true, true, - false, + true, + true, true, true, false, false, + true, + true, + true, + false, false, false ], "parameters": { "Left": [ - "Bytea" + "Float8", + "Int8" ] } }, - "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + "query": "SELECT l1_batches.* FROM l1_batches JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 ORDER BY number LIMIT $2" }, - "75273db544f363b2c75bb7b579ba72fbf9447dd76182159edc40a48b32a9f738": { + "6ae4738857a3dc19860b8dc61b75790dee0030d84438bcc311e917cb1a076289": { "describe": { "columns": [ { - "name": "id", + "name": "proof", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_batch_number", + "name": "aggregation_result_coords", "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "circuit_type", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "prover_input", - "ordinal": 3, "type_info": "Bytea" - }, + } + ], + "nullable": [ + true, + true + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n AND scheduler_witness_jobs.status = 'successful'\n " + }, + "6c81c5a55d595d0790ac20ca202ff3083b0677c47872f2eb1c65e568dd7c156a": { + "describe": { + "columns": [ { - "name": "status", - "ordinal": 4, - "type_info": "Text" + "name": "miniblock_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "error", - "ordinal": 5, - "type_info": "Text" + "name": "log_index_in_miniblock", + "ordinal": 1, + "type_info": "Int4" }, { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" + "name": "log_index_in_tx", + "ordinal": 2, + "type_info": "Int4" }, { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" + "name": "tx_hash", + "ordinal": 3, + "type_info": "Bytea" }, { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" + "name": "block_hash", + "ordinal": 4, + "type_info": "Bytea" }, { - "name": "time_taken", - "ordinal": 9, - "type_info": "Time" + "name": "l1_batch_number?", + "ordinal": 5, + "type_info": "Int8" }, { - "name": "aggregation_round", - "ordinal": 10, + "name": "shard_id", + "ordinal": 6, "type_info": "Int4" }, { - "name": "result", - "ordinal": 11, - "type_info": "Bytea" + "name": "is_service", + "ordinal": 7, + "type_info": "Bool" }, { - "name": "sequence_number", - "ordinal": 12, + "name": "tx_index_in_miniblock", + "ordinal": 8, "type_info": "Int4" }, { - "name": "attempts", - "ordinal": 13, + "name": "tx_index_in_l1_batch", + "ordinal": 9, "type_info": "Int4" }, { - "name": "circuit_input_blob_url", - "ordinal": 14, - "type_info": "Text" + "name": "sender", + "ordinal": 10, + "type_info": "Bytea" }, { - "name": "proccesed_by", - "ordinal": 15, - "type_info": "Text" + "name": "key", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "is_blob_cleaned", - "ordinal": 16, - "type_info": "Bool" + "name": "value", + "ordinal": 12, + "type_info": "Bytea" } ], "nullable": [ @@ -5195,76 +4997,41 @@ false, false, false, - false, - true, - true, + null, + null, false, false, false, false, - true, false, false, - true, - true, false ], "parameters": { "Left": [ - "Interval", - "Int4" + "Bytea" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " - }, - "766119f845a7a11b6a5bb2a29bab32e2890df772b13e1a378222e089736fd3bf": { - "describe": { - "columns": [ - { - "name": "number!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COALESCE(max(number), 0) as \"number!\" FROM l1_batches\n WHERE eth_prove_tx_id IS NOT NULL" - }, - "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " + "query": "\n SELECT\n miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value\n FROM l2_to_l1_logs\n WHERE tx_hash = $1\n ORDER BY log_index_in_tx ASC\n " }, - "79eddef996b6770822a92f06f0f1a61f9fdcb4f7b57a69cbeae23925bcd10b15": { + "6d923b755e1762ebc499cf2c6d7e894357e7b55f3342be08071e2be183ad2a00": { "describe": { "columns": [ { "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "merkel_tree_paths_blob_url", + "ordinal": 1, + "type_info": "Text" } ], "nullable": [ - false + false, + true ], "parameters": { "Left": [ @@ -5272,13 +5039,13 @@ ] } }, - "query": "\n SELECT l1_batch_number FROM witness_inputs\n WHERE length(merkle_tree_paths) <> 0\n LIMIT $1;\n " + "query": "\n SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND merkel_tree_paths_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "7b90e1c16196f0ee29d7278689fe0ac0169093a11b95edf97c729370fadcb73e": { + "6de96eb86301418de9a4342cd66447afd6eb42759d36e164e36adddbd42e98e2": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "number", "ordinal": 0, "type_info": "Int8" } @@ -5287,295 +5054,228 @@ false ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number FROM initial_writes\n WHERE hashed_key = $1\n " + "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE execute_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" }, - "7bd5f83afce3c30c9c10d1d94cf6b943c5ba5caaef9fc9130b5c444af4238e14": { + "6ebe0d6a315050d72ffead2dd695f0ba1926a3f4a1ed56b3f291d0f41b72c4d4": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "hashed_key!", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "merkel_tree_paths_blob_url", + "name": "value?", "ordinal": 1, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ - false, - true + null, + null ], "parameters": { "Left": [ + "ByteaArray", "Int8" ] } }, - "query": "\n SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND merkel_tree_paths_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '2 days'\n LIMIT $1;\n " - }, - "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Timestamp" - ] - } - }, - "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" - }, - "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Jsonb" - ] - } - }, - "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " + "query": "\n SELECT u.hashed_key as \"hashed_key!\",\n (SELECT value FROM storage_logs\n WHERE hashed_key = u.hashed_key AND miniblock_number < $2\n ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\"\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n " }, - "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { + "6f9edffc50202b888d12f80e57a2a346d865e522aa5a02fe3fcfa155406227a4": { "describe": { "columns": [ { - "name": "number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "is_finished", + "name": "full_fee", "ordinal": 2, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "miniblock_number", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "zkporter_is_available", + "name": "refunded_gas", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, "type_info": "Numeric" }, { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, + "name": "l1_tx_refund_recipient", + "ordinal": 34, "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ false, false, + true, + true, false, + true, + true, + true, false, false, - false, - false, - false, - true, true, true, true, @@ -5583,13 +5283,13 @@ true, true, true, - false, - false, true, false, false, false, + true, false, + true, false, false, false, @@ -5598,389 +5298,297 @@ true, true, true, - true, - true, - true, - true, false, - false, - true, true, - true, - false, - false, - false + true ], "parameters": { "Left": [ - "Int8" + "Int8", + "Numeric", + "Numeric" ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" - }, - "7f1a7b5cc5786e1554cb082c2f4cd1368c511e67aeb12465e16661ba940e9538": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "LOCK TABLE prover_jobs IN EXCLUSIVE MODE" + "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" }, - "80ce94067a7727aca34a30372e9770d48b740798121b1abf7b84a6fd3545fe91": { + "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ "ByteaArray", - "Int4Array", - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "Int4Array", - "Int4Array", - "VarcharArray", - "NumericArray", - "JsonbArray", - "ByteaArray", - "JsonbArray", - "Int8Array", "Int8" ] } }, - "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $17,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " + "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " }, - "831e1beb42dab1dc4e9b585bb35ce568196e7f46cb655357fdf5437ece519270": { + "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "basic_circuits_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "basic_circuits_inputs_blob_url", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + true + ], "parameters": { "Left": [ "Int8" ] } }, - "query": "\n UPDATE miniblocks\n SET l1_batch_number = $1\n WHERE l1_batch_number IS NULL\n " + "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "87d0666797929df7eda848701b857af200eaada464c3f02b48f106c61d351239": { + "7229ddaadb494c5723946a1e917840eb6035b7d0923518aac7ba2fb81c711d7b": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ "Int8", - "Int8" - ] - } - }, - "query": "INSERT INTO protective_reads (l1_batch_number, address, key, created_at, updated_at)\n SELECT storage_logs_dedup.l1_batch_number, storage_logs_dedup.address, storage_logs_dedup.key, now(), now()\n FROM storage_logs_dedup\n WHERE l1_batch_number BETWEEN $1 AND $2\n AND is_write = FALSE\n ON CONFLICT DO NOTHING\n " - }, - "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM l1_batches WHERE number > $1" - }, - "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ + "Int8", + "Bytea", + "Int4", "Int4", + "Numeric", "Int8", - "Int8" + "Int8", + "Int8", + "Bytea", + "Bytea" ] } }, - "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "\n INSERT INTO miniblocks (\n number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())\n " }, - "8b96fbf5b8adabd76ea2648688c38c4d9917b3736ca53ed3896c35c0da427369": { + "734fc9cc1ffe10a6c6b56150c0681b6b2757d14b2ea04a289abb1de64dffb172": { "describe": { "columns": [ { - "name": "bytecode_hash", + "name": "hash", "ordinal": 0, "type_info": "Bytea" }, { - "name": "bytecode", + "name": "is_priority", "ordinal": 1, + "type_info": "Bool" + }, + { + "name": "full_fee", + "ordinal": 2, + "type_info": "Numeric" + }, + { + "name": "layer_2_tip_fee", + "ordinal": 3, + "type_info": "Numeric" + }, + { + "name": "initiator_address", + "ordinal": 4, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number\n WHERE miniblocks.l1_batch_number = $1" - }, - "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { - "describe": { - "columns": [ - { - "name": "l1_address", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_address FROM tokens WHERE well_known = false" - }, - "8fefa3194f469b0f46dc5efcb9e6ccc08159ef6a5681090cb7596877b597bc73": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "miniblock_number", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "zkporter_is_available", + "name": "refunded_gas", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "default_aa_code_hash", + "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "block_hash?", "ordinal": 35, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "eth_commit_tx_hash?", "ordinal": 36, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "aux_data_hash", + "name": "eth_prove_tx_hash?", "ordinal": 37, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "pass_through_data_hash", + "name": "eth_execute_tx_hash?", "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "type_info": "Text" } ], "nullable": [ false, false, + true, + true, false, + true, + true, + true, false, false, - false, - false, - false, - true, true, true, true, @@ -5988,13 +5596,13 @@ true, true, true, - false, - false, true, false, false, false, + true, false, + true, false, false, false, @@ -6003,108 +5611,109 @@ true, true, true, - true, - true, - true, - true, false, - false, - true, true, true, false, false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT * FROM l1_batches\n ORDER BY number DESC\n LIMIT 1" - }, - "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { - "describe": { - "columns": [ - { - "name": "value", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ + false, false ], "parameters": { "Left": [ - "Bytea", - "Int8" + "Bytea" ] } }, - "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " + "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, - "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { + "75273db544f363b2c75bb7b579ba72fbf9447dd76182159edc40a48b32a9f738": { "describe": { "columns": [ { "name": "id", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "nonce", + "name": "l1_batch_number", "ordinal": 1, "type_info": "Int8" }, { - "name": "raw_tx", + "name": "circuit_type", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "contract_address", + "name": "prover_input", "ordinal": 3, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "tx_type", + "name": "status", "ordinal": 4, "type_info": "Text" }, { - "name": "gas_used", + "name": "error", "ordinal": 5, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "created_at", + "name": "processing_started_at", "ordinal": 6, "type_info": "Timestamp" }, { - "name": "updated_at", + "name": "created_at", "ordinal": 7, "type_info": "Timestamp" }, { - "name": "has_failed", + "name": "updated_at", "ordinal": 8, - "type_info": "Bool" + "type_info": "Timestamp" }, { - "name": "sent_at_block", + "name": "time_taken", "ordinal": 9, - "type_info": "Int4" + "type_info": "Time" }, { - "name": "confirmed_eth_tx_history_id", + "name": "aggregation_round", "ordinal": 10, "type_info": "Int4" }, { - "name": "predicted_gas_cost", + "name": "result", "ordinal": 11, - "type_info": "Int8" + "type_info": "Bytea" + }, + { + "name": "sequence_number", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "attempts", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 14, + "type_info": "Text" + }, + { + "name": "proccesed_by", + "ordinal": 15, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 16, + "type_info": "Bool" } ], "nullable": [ @@ -6114,292 +5723,327 @@ false, false, true, + true, false, false, false, + false, + true, + false, + false, true, true, false ], "parameters": { "Left": [ - "Bytea", - "Int8", - "Text", - "Text", - "Int8" + "Interval", + "Int4" ] } }, - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, - "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { + "766119f845a7a11b6a5bb2a29bab32e2890df772b13e1a378222e089736fd3bf": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "number!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], "parameters": { - "Left": [ - "Int8Array" - ] + "Left": [] } }, - "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " + "query": "SELECT COALESCE(max(number), 0) as \"number!\" FROM l1_batches\n WHERE eth_prove_tx_id IS NOT NULL" }, - "9be2d960a76e3026408c829cb2fda1eca3b4550edaaa75b5a0e552c3163c1867": { + "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Jsonb", - "Int8", - "Numeric", - "Numeric", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" - ] + "Left": [] } }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, now(), now()\n )\n " + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " }, - "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { + "7b90e1c16196f0ee29d7278689fe0ac0169093a11b95edf97c729370fadcb73e": { "describe": { "columns": [ { - "name": "successful_limit!", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "queued_limit!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "max_block!", - "ordinal": 2, - "type_info": "Int8" } ], "nullable": [ - null, - null, - null + false ], "parameters": { - "Left": [] + "Left": [ + "Bytea" + ] } }, - "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " + "query": "\n SELECT l1_batch_number FROM initial_writes\n WHERE hashed_key = $1\n " }, - "9d2faf0b6f8582f0a2607ddd6e216cccfbea7ff5e99646e3a35420c4d190c5f8": { + "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Bytea", + "Numeric", + "Timestamp" ] } }, - "query": "\n UPDATE witness_inputs\n SET merkle_tree_paths=''\n WHERE l1_batch_number = ANY($1);\n " + "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" }, - "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { + "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Jsonb" + ] + } + }, + "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " + }, + "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { "describe": { "columns": [ { - "name": "address", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic1", + "name": "timestamp", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "topic2", + "name": "is_finished", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "topic3", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic4", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "value", + "name": "fee_account_address", "ordinal": 5, "type_info": "Bytea" }, { - "name": "block_hash", + "name": "bloom", "ordinal": 6, "type_info": "Bytea" }, { - "name": "l1_batch_number?", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Int8" + "type_info": "ByteaArray" }, { - "name": "miniblock_number", + "name": "hash", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "tx_hash", + "name": "parent_hash", "ordinal": 9, "type_info": "Bytea" }, { - "name": "tx_index_in_block", + "name": "commitment", "ordinal": 10, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "event_index_in_block", + "name": "compressed_write_logs", "ordinal": 11, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "event_index_in_tx", + "name": "compressed_contracts", "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, "type_info": "Int4" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - null, - null, - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " - }, - "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Int8" - ] - } - }, - "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" - }, - "a3d6cbf1f4386b65338db27467087eb77479f739dc9e9e2ac004c5c0350aa99e": { - "describe": { - "columns": [ + }, { - "name": "number", - "ordinal": 0, + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, "type_info": "Int8" }, { - "name": "hash", - "ordinal": 1, + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" + }, + { + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" + }, + { + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "\n SELECT number, hash FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n LIMIT $2\n " - }, - "a482c481a9ffaad4735775282cf6e8d68f284884e7c6f043e9737a0d236f2e97": { - "describe": { - "columns": [ + }, { - "name": "tx_hash", - "ordinal": 0, + "name": "compressed_repeated_writes", + "ordinal": 27, "type_info": "Bytea" }, { - "name": "topic2!", - "ordinal": 1, + "name": "l2_l1_compressed_messages", + "ordinal": 28, "type_info": "Bytea" }, { - "name": "topic3!", - "ordinal": 2, + "name": "l2_l1_merkle_root", + "ordinal": 29, "type_info": "Bytea" }, { - "name": "value!", - "ordinal": 3, + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, + "type_info": "Int4" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 33, "type_info": "Bytea" }, { - "name": "l1_address!", - "ordinal": 4, + "name": "default_aa_code_hash", + "ordinal": 34, "type_info": "Bytea" }, { - "name": "l2_address!", - "ordinal": 5, + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 36, + "type_info": "Int8" + }, + { + "name": "aux_data_hash", + "ordinal": 37, "type_info": "Bytea" }, { - "name": "symbol!", - "ordinal": 6, - "type_info": "Varchar" + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" }, { - "name": "name!", - "ordinal": 7, - "type_info": "Varchar" + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" }, { - "name": "decimals!", - "ordinal": 8, - "type_info": "Int4" + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" }, { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" } ], "nullable": [ @@ -6411,20 +6055,85 @@ false, false, false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + false, + false, + false, + false, false, - true + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + false, + false, + false ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Int8" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" + }, + "7f1a7b5cc5786e1554cb082c2f4cd1368c511e67aeb12465e16661ba940e9538": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "LOCK TABLE prover_jobs IN EXCLUSIVE MODE" + }, + "831e1beb42dab1dc4e9b585bb35ce568196e7f46cb655357fdf5437ece519270": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n UPDATE miniblocks\n SET l1_batch_number = $1\n WHERE l1_batch_number IS NULL\n " + }, + "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM l1_batches WHERE number > $1" }, - "a515ac602a38f43bb9cc026a8bbfc5bea47b13326b0a32b13e9d43724bf4165e": { + "88c49ebeb45f7208d223de59ec08a332beac765644e4f29ed855808b8f9cef91": { "describe": { "columns": [ { @@ -6448,600 +6157,507 @@ ] } }, - "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '2 days'\n LIMIT $1;\n " + "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "a7d575d90f9bf19427ddbe342d296effb7c38bc90f213aa1cc94523930dd8f15": { + "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { "describe": { - "columns": [ - { - "name": "tx_hash", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "l1_sender!", - "ordinal": 1, + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + }, + "8a5adf70b154ced83daf6bd085203762380afab2363fa65ff5b7f9df22f48616": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "topic2!", + "name": "is_priority", + "ordinal": 1, + "type_info": "Bool" + }, + { + "name": "full_fee", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "value!", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_address!", + "name": "initiator_address", "ordinal": 4, "type_info": "Bytea" }, { - "name": "l2_address!", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "symbol!", + "name": "signature", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Bytea" }, { - "name": "name!", + "name": "input", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Bytea" }, { - "name": "decimals!", + "name": "data", "ordinal": 8, - "type_info": "Int4" + "type_info": "Jsonb" }, { - "name": "usd_price?", + "name": "received_at", "ordinal": 9, - "type_info": "Numeric" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - true - ], - "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " - }, - "a7f4d8a9520de951c50fd12fafc0ce8895e03932cbb0337ce0ea4e884296ca36": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Text", - "Int4", - "Int4" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n " - }, - "a8d2b80d197d8168a6c1b4666e799a9d6c2e31d84986ae352715e687989f913c": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "contract_address", - "ordinal": 1, - "type_info": "Bytea" + "name": "priority_op_id", + "ordinal": 10, + "type_info": "Int8" }, { - "name": "source_code", - "ordinal": 2, - "type_info": "Text" + "name": "l1_batch_number", + "ordinal": 11, + "type_info": "Int8" }, { - "name": "contract_name", - "ordinal": 3, - "type_info": "Text" + "name": "index_in_block", + "ordinal": 12, + "type_info": "Int4" }, { - "name": "compiler_zksolc_version", - "ordinal": 4, - "type_info": "Text" + "name": "error", + "ordinal": 13, + "type_info": "Varchar" }, { - "name": "optimization_used", - "ordinal": 5, - "type_info": "Bool" + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" }, { - "name": "constructor_arguments", - "ordinal": 6, - "type_info": "Bytea" + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" }, { - "name": "status", - "ordinal": 7, - "type_info": "Text" + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" }, { - "name": "error", - "ordinal": 8, - "type_info": "Text" + "name": "tx_format", + "ordinal": 17, + "type_info": "Int4" }, { "name": "created_at", - "ordinal": 9, + "ordinal": 18, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 10, + "ordinal": 19, "type_info": "Timestamp" }, { - "name": "compilation_errors", - "ordinal": 11, + "name": "execution_info", + "ordinal": 20, "type_info": "Jsonb" }, { - "name": "processing_started_at", - "ordinal": 12, - "type_info": "Timestamp" + "name": "contract_address", + "ordinal": 21, + "type_info": "Bytea" }, { - "name": "compiler_solc_version", - "ordinal": 13, - "type_info": "Text" + "name": "in_mempool", + "ordinal": 22, + "type_info": "Bool" }, { - "name": "attempts", - "ordinal": 14, + "name": "l1_block_number", + "ordinal": 23, "type_info": "Int4" }, { - "name": "panic_message", - "ordinal": 15, - "type_info": "Text" + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, + "type_info": "Bytea" + }, + { + "name": "paymaster_input", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" } ], "nullable": [ false, false, + true, + true, + false, + true, + true, + true, false, false, + true, + true, + true, + true, + true, + true, + true, + true, false, false, false, + true, false, true, false, false, + false, + true, + true, + true, true, true, false, - false, + true, true ], "parameters": { "Left": [ - "Interval" + "Int8" ] } }, - "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING contract_verification_requests.*" + "query": "\n SELECT * FROM transactions\n WHERE miniblock_number = $1\n ORDER BY index_in_block\n " }, - "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { + "8b96fbf5b8adabd76ea2648688c38c4d9917b3736ca53ed3896c35c0da427369": { "describe": { "columns": [ { - "name": "count!", + "name": "bytecode_hash", "ordinal": 0, - "type_info": "Int8" - } + "type_info": "Bytea" + }, + { + "name": "bytecode", + "ordinal": 1, + "type_info": "Bytea" + } ], "nullable": [ - null + false, + false ], "parameters": { "Left": [ - "Bytea", - "Numeric", - "Interval", - "Interval" + "Int8" ] } }, - "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " + "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number\n WHERE miniblocks.l1_batch_number = $1" }, - "aa062e23ada48ce48c1f4005ca059abcad411601e038b19154eedd15a2f7a493": { + "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "l1_address", "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "scheduler_witness_blob_url", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "final_node_aggregations_blob_url", - "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ - false, - true, - true + false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND updated_at < NOW() - INTERVAL '2 days'\n AND scheduler_witness_blob_url is NOT NULL\n AND final_node_aggregations_blob_url is NOT NULL\n LIMIT $1;\n " + "query": "SELECT l1_address FROM tokens WHERE well_known = false" }, - "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { + "8fefa3194f469b0f46dc5efcb9e6ccc08159ef6a5681090cb7596877b597bc73": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "nonce", + "name": "timestamp", "ordinal": 1, "type_info": "Int8" }, { - "name": "raw_tx", + "name": "is_finished", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "contract_address", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "tx_type", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "gas_used", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "bloom", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "has_failed", + "name": "hash", "ordinal": 8, - "type_info": "Bool" + "type_info": "Bytea" }, { - "name": "sent_at_block", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "confirmed_eth_tx_history_id", + "name": "commitment", "ordinal": 10, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "predicted_gas_cost", + "name": "compressed_write_logs", "ordinal": 11, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - false, - false, - false, - true, - true, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " - }, - "aa9256fd40c557a553b407506794bffcc99247ccb9badf6ab303552d7b1bf5d2": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT MIN(count) as \"count\"\n FROM (SELECT COALESCE(SUM(queue_free_slots), 0) as \"count\"\n FROM gpu_prover_queue\n where instance_status = 'available'\n UNION\n SELECT count(*) as \"count\"\n from prover_jobs\n where status = 'queued'\n ) as t1;\n " - }, - "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { - "describe": { - "columns": [ + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, { - "name": "id", - "ordinal": 0, + "name": "eth_prove_tx_id", + "ordinal": 13, "type_info": "Int4" }, { - "name": "eth_tx_id", - "ordinal": 1, + "name": "eth_commit_tx_id", + "ordinal": 14, "type_info": "Int4" }, { - "name": "tx_hash", - "ordinal": 2, - "type_info": "Text" + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" }, { "name": "created_at", - "ordinal": 3, + "ordinal": 16, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 4, + "ordinal": 17, "type_info": "Timestamp" }, { - "name": "base_fee_per_gas", - "ordinal": 5, + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, "type_info": "Int8" }, { - "name": "priority_fee_per_gas", - "ordinal": 6, + "name": "predicted_prove_gas_cost", + "ordinal": 22, "type_info": "Int8" }, { - "name": "confirmed_at", - "ordinal": 7, - "type_info": "Timestamp" + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" }, { - "name": "signed_raw_tx", - "ordinal": 8, - "type_info": "Bytea" + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" }, { - "name": "sent_at_block", - "ordinal": 9, - "type_info": "Int4" + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" }, { - "name": "sent_at", - "ordinal": 10, - "type_info": "Timestamp" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" - }, - "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " - }, - "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { - "describe": { - "columns": [ + "name": "compressed_initial_writes", + "ordinal": 26, + "type_info": "Bytea" + }, { - "name": "market_volume", - "ordinal": 0, - "type_info": "Numeric" + "name": "compressed_repeated_writes", + "ordinal": 27, + "type_info": "Bytea" }, { - "name": "market_volume_updated_at", - "ordinal": 1, - "type_info": "Timestamp" - } - ], - "nullable": [ - true, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" - }, - "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" - }, - "aeece159730c2751bc57880c0c394a4ebb60d263ecb4b7f6e68dce681aa23b65": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET prover_input=''\n WHERE id = ANY($1);\n " - }, - "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { - "describe": { - "columns": [ + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" + }, { - "name": "l1_block_number", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ - true - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" - }, - "afc0448c58b0e2f7a7865cc1b5069d66f4cb9d4f609a0fab06cac3b7784910d1": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Int4", - "Int4" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'available', updated_at = now(), queue_free_slots = $3\n WHERE instance_host = $1::text::inet\n AND instance_port = $2\n AND instance_status = 'full'\n " - }, - "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { - "describe": { - "columns": [ + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, { - "name": "id", - "ordinal": 0, + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, "type_info": "Int4" }, { - "name": "eth_tx_id", - "ordinal": 1, - "type_info": "Int4" + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" }, { - "name": "tx_hash", - "ordinal": 2, - "type_info": "Text" + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" }, { - "name": "created_at", - "ordinal": 3, - "type_info": "Timestamp" + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" }, { - "name": "updated_at", - "ordinal": 4, - "type_info": "Timestamp" + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" }, { "name": "base_fee_per_gas", - "ordinal": 5, - "type_info": "Int8" + "ordinal": 35, + "type_info": "Numeric" }, { - "name": "priority_fee_per_gas", - "ordinal": 6, + "name": "gas_per_pubdata_limit", + "ordinal": 36, "type_info": "Int8" }, { - "name": "confirmed_at", - "ordinal": 7, - "type_info": "Timestamp" + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" }, { - "name": "signed_raw_tx", - "ordinal": 8, + "name": "pass_through_data_hash", + "ordinal": 38, "type_info": "Bytea" }, { - "name": "sent_at_block", - "ordinal": 9, - "type_info": "Int4" + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" }, { - "name": "sent_at", - "ordinal": 10, - "type_info": "Timestamp" + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" } ], "nullable": [ @@ -7052,140 +6668,157 @@ false, false, false, + false, true, true, true, - true + true, + true, + true, + true, + true, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + false, + false, + false ], "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" - }, - "b21656ac9a476ea4c5ddaeae8d557ad284514e65321088d6c45cab2ffea42825": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Text", - "Text" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2\n " - }, - "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Bytea", - "Int4", - "Text" - ] + "Left": [] } }, - "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " + "query": "SELECT * FROM l1_batches\n ORDER BY number DESC\n LIMIT 1" }, - "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { + "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { "describe": { "columns": [ { - "name": "l1_address", + "name": "value", "ordinal": 0, "type_info": "Bytea" - }, - { - "name": "l2_address", - "ordinal": 1, - "type_info": "Bytea" } ], "nullable": [ - false, false ], "parameters": { - "Left": [] + "Left": [ + "Bytea", + "Int8" + ] } }, - "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" + "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " }, - "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { + "9457eab74b924d7d7fd5ecf91886bbfe31844d2158f061cac5aef2ebf8714850": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "nonce", + "name": "l1_batch_number!", "ordinal": 1, "type_info": "Int8" }, { - "name": "raw_tx", + "name": "timestamp", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "contract_address", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "tx_type", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "gas_used", + "name": "root_hash?", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "commit_tx_hash?", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "updated_at", + "name": "committed_at?", "ordinal": 7, "type_info": "Timestamp" }, { - "name": "has_failed", + "name": "prove_tx_hash?", "ordinal": 8, - "type_info": "Bool" + "type_info": "Text" }, { - "name": "sent_at_block", + "name": "proven_at?", "ordinal": 9, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "confirmed_eth_tx_history_id", + "name": "execute_tx_hash?", "ordinal": 10, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "predicted_gas_cost", + "name": "executed_at?", "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 13, "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 15, + "type_info": "Bytea" } ], "nullable": [ + false, + null, false, false, false, @@ -7193,29 +6826,13 @@ false, true, false, - false, + true, false, true, + false, + false, true, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" - }, - "b776d4d774b300958ba027a7dd80bf19e11d7ef202e8c73dca185d553199fb5f": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false + true ], "parameters": { "Left": [ @@ -7223,107 +6840,70 @@ ] } }, - "query": "\n SELECT l1_batch_number FROM scheduler_witness_jobs\n WHERE length(final_node_aggregations) <> 0\n LIMIT $1;\n " - }, - "bd4898ee283a312cb995853686a1f5252e73b22efea3cf9f158c4476c9639b32": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray" - ] - } - }, - "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at)\n SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now()\n FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[])\n AS u(hashed_key, address, key, value, tx_hash)\n ON CONFLICT (hashed_key)\n DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()\n " - }, - "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode_hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " }, - "bf9ad4da63cb23b0991f16381a13139bd32003a7f8d0736deb2b127162e492ec": { + "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { "describe": { "columns": [ { - "name": "number", + "name": "id", "ordinal": 0, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "timestamp", + "name": "nonce", "ordinal": 1, "type_info": "Int8" }, { - "name": "l1_tx_count", + "name": "raw_tx", "ordinal": 2, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "contract_address", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "root_hash?", + "name": "tx_type", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "commit_tx_hash?", + "name": "gas_used", "ordinal": 5, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "committed_at?", + "name": "created_at", "ordinal": 6, "type_info": "Timestamp" }, { - "name": "prove_tx_hash?", + "name": "updated_at", "ordinal": 7, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "proven_at?", + "name": "has_failed", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Bool" }, { - "name": "execute_tx_hash?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "executed_at?", + "name": "confirmed_eth_tx_history_id", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Int4" + }, + { + "name": "predicted_gas_cost", + "ordinal": 11, + "type_info": "Int8" } ], "nullable": [ @@ -7332,315 +6912,200 @@ false, false, false, - false, true, false, - true, false, - true + false, + true, + true, + false ], "parameters": { "Left": [ + "Bytea", + "Int8", + "Text", + "Text", "Int8" ] } }, - "query": "\n SELECT miniblocks.number,\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" }, - "c0532f9e7a6130426acb032f391f6dae7ff22914f0045673c42c1ee84ca36490": { + "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Bytea", - "Bytea" + "Int8Array" ] } }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) sl\n WHERE sl.value != $2\n " + "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " }, - "c1ed4c80984db514dd264a9bc19bdaee29b6f5c291a9d503d9896c41b316cca5": { + "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { "describe": { "columns": [ { - "name": "nonce!", + "name": "successful_limit!", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "queued_limit!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "max_block!", + "ordinal": 2, + "type_info": "Int8" } ], "nullable": [ - true + null, + null, + null ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " + }, + "9d2faf0b6f8582f0a2607ddd6e216cccfbea7ff5e99646e3a35420c4d190c5f8": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Bytea", - "Int8" + "Int8Array" ] } }, - "query": "\n SELECT nonce as \"nonce!\" FROM transactions\n WHERE initiator_address = $1 AND nonce >= $2\n AND is_priority = FALSE\n AND (miniblock_number IS NOT NULL OR error IS NULL)\n ORDER BY nonce\n " + "query": "\n UPDATE witness_inputs\n SET merkle_tree_paths=''\n WHERE l1_batch_number = ANY($1);\n " }, - "c2f6f7fa37b303748f47ff2de01227e7afbc9ff041bc1428743d91300f5f5caf": { + "9e014fe6841b7aab6317b3ee1dc1ab85b2f75ea7836777ef0c70fa1a1023d38f": { "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - true - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Text", + "Int4", + "Int4", + "Int2", + "Text" ] } }, - "query": "\n SELECT l1_batch_number FROM miniblocks\n WHERE number = $1\n " + "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, now(), now())\n ON CONFLICT(instance_host, instance_port, region)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, updated_at=now()" }, - "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { + "9e994205fe5886f0f8f729110599f3c344562e560fd492e071c9c5bbe50812cf": { "describe": { "columns": [ { "name": "id", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "eth_tx_id", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "circuit_type", "ordinal": 2, "type_info": "Text" }, { - "name": "base_fee_per_gas", + "name": "prover_input", "ordinal": 3, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "priority_fee_per_gas", + "name": "status", "ordinal": 4, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "signed_raw_tx", + "name": "error", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "nonce", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Int8" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" - }, - "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { - "describe": { - "columns": [ - { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" - }, - "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - } - }, - "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " - }, - "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4" - ] - } - }, - "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" - }, - "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { - "describe": { - "columns": [ - { - "name": "usd_price", - "ordinal": 0, - "type_info": "Numeric" - }, - { - "name": "usd_price_updated_at", - "ordinal": 1, "type_info": "Timestamp" - } - ], - "nullable": [ - true, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" - }, - "c8c154ac76bb14498fb0f0720fcdab4d863985a8d15c9d8b8b1f68f390bc1c03": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "leaf_layer_subqueues", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "aggregation_outputs", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "number_of_leaf_circuits", - "ordinal": 3, - "type_info": "Int4" }, { - "name": "status", - "ordinal": 4, - "type_info": "Text" + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" }, { - "name": "processing_started_at", - "ordinal": 5, + "name": "updated_at", + "ordinal": 8, "type_info": "Timestamp" }, { "name": "time_taken", - "ordinal": 6, + "ordinal": 9, "type_info": "Time" }, { - "name": "error", - "ordinal": 7, - "type_info": "Text" + "name": "aggregation_round", + "ordinal": 10, + "type_info": "Int4" }, { - "name": "created_at", - "ordinal": 8, - "type_info": "Timestamp" + "name": "result", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "updated_at", - "ordinal": 9, - "type_info": "Timestamp" + "name": "sequence_number", + "ordinal": 12, + "type_info": "Int4" }, { "name": "attempts", - "ordinal": 10, + "ordinal": 13, "type_info": "Int4" }, { - "name": "leaf_layer_subqueues_blob_url", - "ordinal": 11, + "name": "circuit_input_blob_url", + "ordinal": 14, "type_info": "Text" }, { - "name": "aggregation_outputs_blob_url", - "ordinal": 12, + "name": "proccesed_by", + "ordinal": 15, "type_info": "Text" }, { "name": "is_blob_cleaned", - "ordinal": 13, + "ordinal": 16, "type_info": "Bool" } ], "nullable": [ false, - true, - true, - true, + false, + false, + false, false, true, true, - true, false, false, false, + false, + true, + false, + false, true, true, false @@ -7648,54 +7113,80 @@ "parameters": { "Left": [ "Interval", - "Int4" + "Int4", + "TextArray" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM node_aggregation_witness_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs.*\n " + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($3)\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, - "cbd6ed03ec615ee3a1747bc39f068e792d2c51ef4e3717b3f0074a38a625a44b": { + "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { "columns": [ { - "name": "number", + "name": "address", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "topic1", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "hash", + "name": "topic2", "ordinal": 2, "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "topic3", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "topic4", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "value", "ordinal": 5, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "l1_gas_price", + "name": "block_hash", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l2_fair_gas_price", + "name": "l1_batch_number?", "ordinal": 7, "type_info": "Int8" + }, + { + "name": "miniblock_number", + "ordinal": 8, + "type_info": "Int8" + }, + { + "name": "tx_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "tx_index_in_block", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "event_index_in_block", + "ordinal": 11, + "type_info": "Int4" + }, + { + "name": "event_index_in_tx", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ @@ -7705,39 +7196,37 @@ false, false, false, + null, + null, + false, + false, + false, false, false ], "parameters": { "Left": [ - "Int8" + "Bytea" ] } }, - "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price\n FROM miniblocks\n WHERE number = $1\n " + "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "cbe9445b28efc540d4a01b4c8f1e62017e9854b2d01973c55b27603a8a81bbdd": { + "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { "describe": { - "columns": [ - { - "name": "value", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Bytea", + "Int4", + "Int8", "Int8" ] } }, - "query": "select value from storage_logs where hashed_key = $1 and miniblock_number <= $2 order by miniblock_number desc, operation_number desc limit 1" + "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, - "ce12a389d218de2071752e8f67b9ad3132777c8a8737009be283e1bedef6dad5": { + "a3d6cbf1f4386b65338db27467087eb77479f739dc9e9e2ac004c5c0350aa99e": { "describe": { "columns": [ { @@ -7746,214 +7235,309 @@ "type_info": "Int8" }, { - "name": "timestamp", + "name": "hash", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "\n SELECT number, hash FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n LIMIT $2\n " + }, + "a482c481a9ffaad4735775282cf6e8d68f284884e7c6f043e9737a0d236f2e97": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Bytea" }, { - "name": "is_finished", + "name": "topic2!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "topic3!", "ordinal": 2, - "type_info": "Bool" + "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "value!", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "l1_address!", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "l2_address!", "ordinal": 5, "type_info": "Bytea" }, { - "name": "bloom", + "name": "symbol!", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Varchar" }, { - "name": "priority_ops_onchain_data", + "name": "name!", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Varchar" }, { - "name": "hash", + "name": "decimals!", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "parent_hash", + "name": "usd_price?", "ordinal": 9, + "type_info": "Numeric" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + }, + "a4eef598864b0d59bd663eb16bff3a23bcb7ac37bb6a2e702d6415b8dd99cd9f": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int8", + "Bool", + "Bytea", + "ByteaArray", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Numeric", + "Int8", + "Int8", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count,\n timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data,\n predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost,\n initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())\n " + }, + "a7d575d90f9bf19427ddbe342d296effb7c38bc90f213aa1cc94523930dd8f15": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "commitment", - "ordinal": 10, + "name": "l1_sender!", + "ordinal": 1, "type_info": "Bytea" }, { - "name": "compressed_write_logs", - "ordinal": 11, + "name": "topic2!", + "ordinal": 2, "type_info": "Bytea" }, { - "name": "compressed_contracts", - "ordinal": 12, + "name": "value!", + "ordinal": 3, "type_info": "Bytea" }, { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" + "name": "l1_address!", + "ordinal": 4, + "type_info": "Bytea" }, { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" + "name": "l2_address!", + "ordinal": 5, + "type_info": "Bytea" }, { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" + "name": "symbol!", + "ordinal": 6, + "type_info": "Varchar" }, { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" + "name": "name!", + "ordinal": 7, + "type_info": "Varchar" }, { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" + "name": "decimals!", + "ordinal": 8, + "type_info": "Int4" }, { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " + }, + "a7f4d8a9520de951c50fd12fafc0ce8895e03932cbb0337ce0ea4e884296ca36": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Int4", + "Int4" + ] + } + }, + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n " + }, + "a8d2b80d197d8168a6c1b4666e799a9d6c2e31d84986ae352715e687989f913c": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" + "name": "contract_address", + "ordinal": 1, + "type_info": "Bytea" }, { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" + "name": "source_code", + "ordinal": 2, + "type_info": "Text" }, { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" + "name": "contract_name", + "ordinal": 3, + "type_info": "Text" }, { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" + "name": "compiler_zksolc_version", + "ordinal": 4, + "type_info": "Text" }, { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" + "name": "optimization_used", + "ordinal": 5, + "type_info": "Bool" }, { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, + "name": "constructor_arguments", + "ordinal": 6, "type_info": "Bytea" }, { - "name": "l1_gas_price", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 31, - "type_info": "Int8" + "name": "status", + "ordinal": 7, + "type_info": "Text" }, { - "name": "rollup_last_leaf_index", - "ordinal": 32, - "type_info": "Int8" + "name": "error", + "ordinal": 8, + "type_info": "Text" }, { - "name": "zkporter_is_available", - "ordinal": 33, - "type_info": "Bool" + "name": "created_at", + "ordinal": 9, + "type_info": "Timestamp" }, { - "name": "bootloader_code_hash", - "ordinal": 34, - "type_info": "Bytea" + "name": "updated_at", + "ordinal": 10, + "type_info": "Timestamp" }, { - "name": "default_aa_code_hash", - "ordinal": 35, - "type_info": "Bytea" + "name": "compilation_errors", + "ordinal": 11, + "type_info": "Jsonb" }, { - "name": "base_fee_per_gas", - "ordinal": 36, - "type_info": "Numeric" + "name": "processing_started_at", + "ordinal": 12, + "type_info": "Timestamp" }, { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" + "name": "compiler_solc_version", + "ordinal": 13, + "type_info": "Text" }, { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" + "name": "attempts", + "ordinal": 14, + "type_info": "Int4" }, { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" + "name": "panic_message", + "ordinal": 15, + "type_info": "Text" }, { - "name": "skip_proof", - "ordinal": 40, + "name": "is_system", + "ordinal": 16, "type_info": "Bool" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 41, - "type_info": "Int4" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ @@ -7966,124 +7550,24 @@ false, false, true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, false, false, true, true, - true, - true, false, - true, - true, - true, false, true, false ], "parameters": { "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "\n SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit\n FROM\n (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number\n FROM l1_batches\n WHERE eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY number LIMIT $2) inn\n WHERE number - row_number = $1\n " - }, - "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - } - }, - "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " - }, - "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM miniblocks WHERE number > $1" - }, - "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Jsonb", - "Text" - ] - } - }, - "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " - }, - "d0571a05a9f65e71b3ab478dc7217c3644024ed0d6ae6616c331a7737759c86c": { - "describe": { - "columns": [ - { - "name": "merkle_root_hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT merkle_root_hash FROM l1_batches WHERE number = $1" - }, - "d0770d2d0cc0cec5cf5c2e90912b697f19adbdf5cb6e734c3bddd06ad96e83e9": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "TextArray" + "Interval" ] } }, - "query": "\n INSERT INTO contract_verification_solc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " + "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING contract_verification_requests.*" }, - "d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": { + "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { "describe": { "columns": [ { @@ -8096,121 +7580,177 @@ null ], "parameters": { - "Left": [] + "Left": [ + "Bytea", + "Numeric", + "Interval", + "Interval" + ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" + "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " }, - "d2f16dcd8175a337f57724ce5b2fb59d2934f60bb2d24c6ec77195dc63c26002": { + "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { "describe": { "columns": [ { - "name": "hash!", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "l1_address!", + "name": "nonce", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l2_address!", + "name": "raw_tx", "ordinal": 2, "type_info": "Bytea" }, { - "name": "symbol!", + "name": "contract_address", "ordinal": 3, - "type_info": "Varchar" + "type_info": "Text" }, { - "name": "name!", + "name": "tx_type", "ordinal": 4, - "type_info": "Varchar" + "type_info": "Text" }, { - "name": "decimals!", + "name": "gas_used", "ordinal": 5, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "usd_price?", + "name": "created_at", "ordinal": 6, - "type_info": "Numeric" - } - ], - "nullable": [ - true, - true, - true, - true, - true, - true, - true - ], - "parameters": { - "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "has_failed", + "ordinal": 8, + "type_info": "Bool" + }, + { + "name": "sent_at_block", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "confirmed_eth_tx_history_id", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "predicted_gas_cost", + "ordinal": 11, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true, + false, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Int8" ] } }, - "query": "\n SELECT hash as \"hash!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM transactions\n INNER JOIN tokens\n ON tokens.l2_address = transactions.contract_address OR (transactions.contract_address = $2 AND tokens.l2_address = $3)\n WHERE hash = ANY($1)\n " + "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " }, - "d6654b10ce779826e565bddf67c9a1aca2767f11e858eb9aaedff4b0ea277a34": { + "aa9256fd40c557a553b407506794bffcc99247ccb9badf6ab303552d7b1bf5d2": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "count", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(count) as \"count\"\n FROM (SELECT COALESCE(SUM(queue_free_slots), 0) as \"count\"\n FROM gpu_prover_queue\n where instance_status = 'available'\n UNION\n SELECT count(*) as \"count\"\n from prover_jobs\n where status = 'queued'\n ) as t1;\n " + }, + "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" }, { - "name": "topic2!", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic3!", + "name": "tx_hash", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value!", + "name": "created_at", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_address!", + "name": "updated_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_address!", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "symbol!", + "name": "priority_fee_per_gas", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "name!", + "name": "confirmed_at", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Timestamp" }, { - "name": "decimals!", + "name": "signed_raw_tx", "ordinal": 8, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "usd_price?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Numeric" + "type_info": "Int4" + }, + { + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ @@ -8221,47 +7761,49 @@ false, false, false, - false, - false, + true, + true, + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea", - "Bytea" + "Int4" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n tokens.l2_address = events.address OR (events.address = $3 AND tokens.l2_address = $4)\n WHERE tx_hash = ANY($1) AND topic1 = $2\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" }, - "d6709f3ce8f08f988e10a0e0fb5c06db9488834a85066babaf3d56cf212b4ea0": { + "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ "Bytea", - "Varchar", - "Varchar", - "Int4" + "Int8" ] } }, - "query": "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3,\n token_list_decimals = $4, well_known = true, updated_at = now()\n WHERE l1_address = $1\n " + "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " }, - "d8515595d34dca53e50bbd4ed396f6208e33f596195a5ed02fba9e8364ceb33c": { + "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { "describe": { "columns": [ { - "name": "bytecode", + "name": "market_volume", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Numeric" + }, + { + "name": "market_volume_updated_at", + "ordinal": 1, + "type_info": "Timestamp" } ], "nullable": [ - false + true, + true ], "parameters": { "Left": [ @@ -8269,9 +7811,9 @@ ] } }, - "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1" + "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" }, - "dbf9a2be8cdd0a8ad95f049134d33ae0c4ed4204e4d8f6e5f3244bea4830f67e": { + "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { "describe": { "columns": [], "nullable": [], @@ -8281,117 +7823,147 @@ ] } }, - "query": "\n UPDATE l1_batches\n SET skip_proof = TRUE WHERE number = $1\n " + "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" }, - "dbfb1709a68fccf341320f7cf1b757378ec462d63d17672f82a8d9f95797136d": { + "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { "describe": { "columns": [ { - "name": "hash", + "name": "l1_block_number", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "received_at", - "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Int4" } ], "nullable": [ - false, - false + true ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" + }, + "afc0448c58b0e2f7a7865cc1b5069d66f4cb9d4f609a0fab06cac3b7784910d1": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Timestamp", - "Int8" + "Text", + "Int4", + "Int4" ] } }, - "query": "\n SELECT transactions.hash, transactions.received_at\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = miniblock_number\n WHERE received_at > $1\n ORDER BY received_at ASC\n LIMIT $2\n " + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'available', updated_at = now(), queue_free_slots = $3\n WHERE instance_host = $1::text::inet\n AND instance_port = $2\n AND instance_status = 'full'\n " }, - "dd10ebfbf5db4d2ac44b03be3acf494ea180f59685d8fc156af481e8265079c2": { + "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { "describe": { "columns": [ { - "name": "hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "number", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "timestamp", + "name": "tx_hash", "ordinal": 2, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 4, + "type_info": "Timestamp" + }, + { + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Int8" + }, + { + "name": "priority_fee_per_gas", + "ordinal": 6, "type_info": "Int8" + }, + { + "name": "confirmed_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "signed_raw_tx", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "sent_at_block", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ false, false, - false + false, + false, + false, + false, + false, + true, + true, + true, + true ], "parameters": { "Left": [ - "Int8" + "Int4" ] } }, - "query": "\n SELECT\n hash,\n number,\n timestamp\n FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n " + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" }, - "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { + "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { "describe": { "columns": [], "nullable": [], - "parameters": { - "Left": [ - "Varchar", - "Bytea" - ] - } - }, - "query": "UPDATE transactions\n SET error = $1, updated_at = now()\n WHERE hash = $2" - }, - "ddb3b38be2b6038b63288961f46ba7d3bb7250caff1146e13c5ee77b6a994ffc": { - "describe": { - "columns": [ - { - "name": "circuit_type", - "ordinal": 0, - "type_info": "Text" - }, - { - "name": "result", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - true - ], "parameters": { "Left": [ "Int8", - "Int4" + "Text", + "Int4", + "Bytea", + "Int4", + "Text" ] } }, - "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " + "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " }, - "dec8533793968c9db379e3da18f262ea9d9dce2f8959c29b0a638296bf10ccc2": { + "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { "describe": { "columns": [ { - "name": "key", + "name": "l1_address", "ordinal": 0, "type_info": "Bytea" }, { - "name": "bytecode", + "name": "l2_address", "ordinal": 1, "type_info": "Bytea" } @@ -8401,48 +7973,73 @@ false ], "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT storage_logs.key, factory_deps.bytecode\n FROM storage_logs\n JOIN factory_deps ON storage_logs.value = factory_deps.bytecode_hash\n WHERE\n storage_logs.address = $1 AND\n storage_logs.miniblock_number >= $3 AND\n storage_logs.miniblock_number <= $4 AND\n NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $2\n )\n " + "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" }, - "e14338281eb639856f1c7a8ba6b60fe3914d3f30d0b55cea8fb287209892df03": { + "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { "describe": { "columns": [ { - "name": "key_address", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "bytecode", + "name": "nonce", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "creator_address?", + "name": "raw_tx", "ordinal": 2, "type_info": "Bytea" }, { - "name": "creator_tx_hash?", + "name": "contract_address", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "created_in_block_number", + "name": "tx_type", "ordinal": 4, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "verification_info", + "name": "gas_used", "ordinal": 5, - "type_info": "Jsonb" + "type_info": "Int8" + }, + { + "name": "created_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "has_failed", + "ordinal": 8, + "type_info": "Bool" + }, + { + "name": "sent_at_block", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "confirmed_eth_tx_history_id", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "predicted_gas_cost", + "ordinal": 11, + "type_info": "Int8" } ], "nullable": [ @@ -8451,72 +8048,73 @@ false, false, false, - true + true, + false, + false, + false, + true, + true, + false ], "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Bytea" - ] + "Left": [] } }, - "query": "\n WITH sl AS (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n )\n SELECT\n sl.key as \"key_address\",\n fd.bytecode,\n txs.initiator_address as \"creator_address?\",\n txs.hash as \"creator_tx_hash?\",\n sl.miniblock_number as \"created_in_block_number\",\n c.verification_info\n FROM sl\n JOIN factory_deps fd ON fd.bytecode_hash = sl.value\n LEFT JOIN transactions txs ON txs.hash = sl.tx_hash\n LEFT JOIN contracts_verification_info c ON c.address = $2\n WHERE sl.value != $3\n " + "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" }, - "e15e67a56e3caa0a30d4981308437a531f6d16e7a7bb5ebacd9a9466b10f5e7a": { + "bd4898ee283a312cb995853686a1f5252e73b22efea3cf9f158c4476c9639b32": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Text" + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray" ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2, status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1\n " + "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at)\n SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now()\n FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[])\n AS u(hashed_key, address, key, value, tx_hash)\n ON CONFLICT (hashed_key)\n DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()\n " }, - "e199251d38cb1f18993863f2e7920f21f7867ae1b48ffc905919de7bd98491de": { + "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { "describe": { "columns": [ { - "name": "min?", + "name": "bytecode", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" + }, + { + "name": "bytecode_hash", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ - null + false, + false ], "parameters": { - "Left": [] + "Left": [ + "ByteaArray" + ] } }, - "query": "\n SELECT MIN(miniblock_number) as \"min?\"\n FROM l2_to_l1_logs\n " + "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" }, - "e2023b335b34b24cd0bd8d1d972aa1867a13c78504312fc718e801272c47b559": { + "c0532f9e7a6130426acb032f391f6dae7ff22914f0045673c42c1ee84ca36490": { "describe": { "columns": [ { - "name": "bytecode", + "name": "count!", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 1, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false, - false, - true + null ], "parameters": { "Left": [ @@ -8525,106 +8123,184 @@ ] } }, - "query": "\n SELECT factory_deps.bytecode, transactions.data, transactions.contract_address\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) sl\n WHERE sl.value != $2\n " }, - "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { + "c1ed4c80984db514dd264a9bc19bdaee29b6f5c291a9d503d9896c41b316cca5": { "describe": { "columns": [ { - "name": "id", + "name": "nonce!", "ordinal": 0, - "type_info": "Int4" - }, - { - "name": "eth_tx_id", - "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ - false, - false + true ], "parameters": { "Left": [ - "Text" + "Bytea", + "Int8" ] } }, - "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" + "query": "\n SELECT nonce as \"nonce!\" FROM transactions\n WHERE initiator_address = $1 AND nonce >= $2\n AND is_priority = FALSE\n AND (miniblock_number IS NOT NULL OR error IS NULL)\n ORDER BY nonce\n " }, - "e42721cc22fbb2bda84f64057586f019cc5122c8e8723f2a9df778b2aa19fffc": { + "c2f6f7fa37b303748f47ff2de01227e7afbc9ff041bc1428743d91300f5f5caf": { "describe": { "columns": [ { - "name": "version", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Text" + "type_info": "Int8" } ], "nullable": [ - false + true ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "SELECT version FROM contract_verification_solc_versions ORDER by version" + "query": "\n SELECT l1_batch_number FROM miniblocks\n WHERE number = $1\n " }, - "e7f7e746aca1c17a8c88aba2db3f7cbd7c639c003580fc72e7b6af4c8ffba595": { + "c4250120d4a7333157bf50058e9dd568d92f8e2060c27d4fd51d337be91a9aa1": { "describe": { "columns": [ { - "name": "bytecode_hash", + "name": "instance_host", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Inet" }, { - "name": "bytecode", + "name": "instance_port", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" + }, + { + "name": "instance_status", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 4, + "type_info": "Timestamp" + }, + { + "name": "processing_started_at", + "ordinal": 5, + "type_info": "Timestamp" + }, + { + "name": "queue_free_slots", + "ordinal": 6, + "type_info": "Int4" + }, + { + "name": "queue_capacity", + "ordinal": 7, + "type_info": "Int4" + }, + { + "name": "specialized_prover_group_id", + "ordinal": 8, + "type_info": "Int2" + }, + { + "name": "region", + "ordinal": 9, + "type_info": "Text" } ], "nullable": [ false, + false, + false, + false, + false, + true, + true, + true, + true, false ], "parameters": { "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n WHERE miniblock_number >= $1 AND miniblock_number <= $2" - }, - "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray" + "Interval", + "Int2", + "Text" ] } }, - "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE (instance_host, instance_port) in (\n SELECT instance_host, instance_port\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " }, - "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { + "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + }, + { + "name": "eth_tx_id", + "ordinal": 1, + "type_info": "Int4" + }, + { + "name": "tx_hash", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "base_fee_per_gas", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "priority_fee_per_gas", + "ordinal": 4, + "type_info": "Int8" + }, + { + "name": "signed_raw_tx", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "nonce", + "ordinal": 6, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true, + false + ], "parameters": { "Left": [] } }, - "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" + "query": "\n SELECT \n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM eth_txs_history \n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id \n WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY eth_txs_history.id DESC" }, - "ec4a3bc6a7a9c13ad11a4b71bed019a961f918a1d1376440c484cc42432c6c9c": { + "c6aadc4ec78e30f5775f7a9f866ad02984b78de3e3d1f34c144a4057ff44ea6a": { "describe": { "columns": [ { - "name": "count!", + "name": "count", "ordinal": 0, "type_info": "Int8" } @@ -8633,15 +8309,12 @@ null ], "parameters": { - "Left": [ - "Int8", - "Int4" - ] + "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM prover_jobs\n WHERE status = 'successful' AND l1_batch_number = $1 AND aggregation_round = $2\n " + "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" }, - "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { + "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { "describe": { "columns": [], "nullable": [], @@ -8651,146 +8324,174 @@ ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " }, - "ee7bd820bf35c5c714092494c386eccff25457cff6dc00eb81d9809eaeb95670": { + "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { "describe": { "columns": [ { - "name": "is_replaced!", + "name": "bytecode", "ordinal": 0, - "type_info": "Bool" + "type_info": "Bytea" } ], "nullable": [ - null + false ], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Int8", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Numeric", - "Bytea", - "Jsonb", - "Int4", - "Bytea", - "Numeric", - "Bytea", "Bytea", "Int8", - "Int4", - "Int4", - "Timestamp" + "Bytea" ] } }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, FALSE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n $19, now(), now()\n )\n ON CONFLICT\n (initiator_address, nonce)\n DO UPDATE\n SET hash=$1,\n signature=$4,\n gas_limit=$5,\n max_fee_per_gas=$6,\n max_priority_fee_per_gas=$7,\n gas_per_pubdata_limit=$8,\n input=$9,\n data=$10,\n tx_format=$11,\n contract_address=$12,\n value=$13,\n paymaster=$14,\n paymaster_input=$15,\n execution_info=jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n in_mempool=FALSE,\n received_at=$19,\n created_at=now(),\n updated_at=now(),\n error = NULL\n WHERE transactions.is_priority = FALSE AND transactions.miniblock_number IS NULL\n RETURNING (SELECT hash FROM transactions WHERE transactions.initiator_address = $2 AND transactions.nonce = $3) IS NOT NULL as \"is_replaced!\"\n " + "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " }, - "eeb83808774404b1af3e09c1f89399c92f743be21e45b7b19a0ece6084e61c6c": { + "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { "describe": { "columns": [ { - "name": "tx_format", + "name": "l1_batch_number", "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "merkle_tree_paths", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "created_at", + "ordinal": 2, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" + }, + { + "name": "processing_started_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "error", + "ordinal": 7, + "type_info": "Varchar" + }, + { + "name": "attempts", + "ordinal": 8, "type_info": "Int4" + }, + { + "name": "merkel_tree_paths_blob_url", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 10, + "type_info": "Bool" } ], "nullable": [ - true + false, + true, + false, + false, + false, + false, + true, + true, + false, + true, + false ], "parameters": { "Left": [ + "Interval", + "Int4", "Int8" ] } }, - "query": "\n UPDATE transactions \n SET tx_format=255 \n WHERE hash IN (\n SELECT hash \n FROM transactions\n WHERE is_priority = true\n AND tx_format is null\n LIMIT $1\n )\n RETURNING tx_format\n " + "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "efc83e42f5d0238b8996a5b311746527289a5a002ff659531a076680127e8eb4": { + "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - true - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int8", + "Int4", + "Int4" ] } }, - "query": "SELECT hash FROM l1_batches WHERE number = $1" + "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" }, - "f0308ffa4cc34a305150959ad1a30792c0b2bf493c6fa6183725b731a89c11e8": { + "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { "describe": { "columns": [ { - "name": "count", + "name": "usd_price", "ordinal": 0, - "type_info": "Int8" + "type_info": "Numeric" + }, + { + "name": "usd_price_updated_at", + "ordinal": 1, + "type_info": "Timestamp" } ], "nullable": [ - null + true, + true ], "parameters": { "Left": [ - "Bytea", "Bytea" ] } }, - "query": "SELECT count(*)\n FROM storage\n WHERE\n address = $1 AND\n value != $2\n " + "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" }, - "f0c50c53c3883c1ae59263b40e55011760d64350eff411eef856ff301bb70579": { + "cbe9445b28efc540d4a01b4c8f1e62017e9854b2d01973c55b27603a8a81bbdd": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "value", "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 1, - "type_info": "Int4" + "type_info": "Bytea" } ], "nullable": [ - true, - true + false ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT l1_batch_number, l1_batch_tx_index\n FROM transactions\n WHERE hash = $1\n " - }, - "f3f7ceb708cc072d66e8609d64ba99e6faa80bf58ff0ce0ef49e882af63522d4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ + "Bytea", "Int8" ] } }, - "query": "\n INSERT INTO node_aggregation_witness_jobs\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_artifacts', now(), now())\n " + "query": "select value from storage_logs where hashed_key = $1 and miniblock_number <= $2 order by miniblock_number desc, operation_number desc limit 1" }, - "f59f291b06c6a0cd0ba4de04f07c05a38a93665cc81f78b14223d6ceef5d6ba6": { + "ce12a389d218de2071752e8f67b9ad3132777c8a8737009be283e1bedef6dad5": { "describe": { "columns": [ { @@ -8944,39 +8645,39 @@ "type_info": "Bytea" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "l1_gas_price", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l2_fair_gas_price", "ordinal": 31, "type_info": "Int8" }, { - "name": "zkporter_is_available", + "name": "rollup_last_leaf_index", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "zkporter_is_available", "ordinal": 33, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "default_aa_code_hash", + "name": "bootloader_code_hash", "ordinal": 34, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "default_aa_code_hash", "ordinal": 35, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "base_fee_per_gas", "ordinal": 36, - "type_info": "Int8" + "type_info": "Numeric" }, { "name": "aux_data_hash", @@ -8999,12 +8700,12 @@ "type_info": "Bool" }, { - "name": "l1_gas_price", + "name": "gas_per_pubdata_byte_in_block", "ordinal": 41, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", + "name": "gas_per_pubdata_limit", "ordinal": 42, "type_info": "Int8" } @@ -9040,184 +8741,1025 @@ true, true, true, - true, + false, + false, true, true, true, true, false, + true, + true, + true, false, + true, + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "\n SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit\n FROM\n (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number\n FROM l1_batches\n WHERE eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY number LIMIT $2) inn\n WHERE number - row_number = $1\n " + }, + "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + } + }, + "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " + }, + "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM miniblocks WHERE number > $1" + }, + "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Jsonb", + "Text" + ] + } + }, + "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " + }, + "d0571a05a9f65e71b3ab478dc7217c3644024ed0d6ae6616c331a7737759c86c": { + "describe": { + "columns": [ + { + "name": "merkle_root_hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT merkle_root_hash FROM l1_batches WHERE number = $1" + }, + "d0770d2d0cc0cec5cf5c2e90912b697f19adbdf5cb6e734c3bddd06ad96e83e9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "TextArray" + ] + } + }, + "query": "\n INSERT INTO contract_verification_solc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " + }, + "d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" + }, + "d2f16dcd8175a337f57724ce5b2fb59d2934f60bb2d24c6ec77195dc63c26002": { + "describe": { + "columns": [ + { + "name": "hash!", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "l1_address!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "l2_address!", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "symbol!", + "ordinal": 3, + "type_info": "Varchar" + }, + { + "name": "name!", + "ordinal": 4, + "type_info": "Varchar" + }, + { + "name": "decimals!", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "usd_price?", + "ordinal": 6, + "type_info": "Numeric" + } + ], + "nullable": [ true, true, true, + true, + true, + true, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT hash as \"hash!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM transactions\n INNER JOIN tokens\n ON tokens.l2_address = transactions.contract_address OR (transactions.contract_address = $2 AND tokens.l2_address = $3)\n WHERE hash = ANY($1)\n " + }, + "d6654b10ce779826e565bddf67c9a1aca2767f11e858eb9aaedff4b0ea277a34": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "topic2!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "topic3!", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "value!", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "l1_address!", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "l2_address!", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "symbol!", + "ordinal": 6, + "type_info": "Varchar" + }, + { + "name": "name!", + "ordinal": 7, + "type_info": "Varchar" + }, + { + "name": "decimals!", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" + } + ], + "nullable": [ false, false, - false + false, + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n tokens.l2_address = events.address OR (events.address = $3 AND tokens.l2_address = $4)\n WHERE tx_hash = ANY($1) AND topic1 = $2\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + }, + "d6709f3ce8f08f988e10a0e0fb5c06db9488834a85066babaf3d56cf212b4ea0": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Varchar", + "Varchar", + "Int4" + ] + } + }, + "query": "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3,\n token_list_decimals = $4, well_known = true, updated_at = now()\n WHERE l1_address = $1\n " + }, + "d8515595d34dca53e50bbd4ed396f6208e33f596195a5ed02fba9e8364ceb33c": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1" + }, + "d9b5fe50f1669cd648badb6d1ffe3dfa4fd263d9e3f946550bc8551815627ba5": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number FROM witness_inputs\n WHERE length(merkle_tree_paths) <> 0\n ORDER BY l1_batch_number DESC\n LIMIT $1;\n " + }, + "dbf9a2be8cdd0a8ad95f049134d33ae0c4ed4204e4d8f6e5f3244bea4830f67e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n UPDATE l1_batches\n SET skip_proof = TRUE WHERE number = $1\n " + }, + "dbfb1709a68fccf341320f7cf1b757378ec462d63d17672f82a8d9f95797136d": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "received_at", + "ordinal": 1, + "type_info": "Timestamp" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Timestamp", + "Int8" + ] + } + }, + "query": "\n SELECT transactions.hash, transactions.received_at\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = miniblock_number\n WHERE received_at > $1\n ORDER BY received_at ASC\n LIMIT $2\n " + }, + "dd10ebfbf5db4d2ac44b03be3acf494ea180f59685d8fc156af481e8265079c2": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 2, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT\n hash,\n number,\n timestamp\n FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n " + }, + "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Varchar", + "Bytea" + ] + } + }, + "query": "UPDATE transactions\n SET error = $1, updated_at = now()\n WHERE hash = $2" + }, + "ddb3b38be2b6038b63288961f46ba7d3bb7250caff1146e13c5ee77b6a994ffc": { + "describe": { + "columns": [ + { + "name": "circuit_type", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "result", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + true + ], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + } + }, + "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " + }, + "dec8533793968c9db379e3da18f262ea9d9dce2f8959c29b0a638296bf10ccc2": { + "describe": { + "columns": [ + { + "name": "key", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "bytecode", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Int8" + ] + } + }, + "query": "\n SELECT storage_logs.key, factory_deps.bytecode\n FROM storage_logs\n JOIN factory_deps ON storage_logs.value = factory_deps.bytecode_hash\n WHERE\n storage_logs.address = $1 AND\n storage_logs.miniblock_number >= $3 AND\n storage_logs.miniblock_number <= $4 AND\n NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $2\n )\n " + }, + "e14338281eb639856f1c7a8ba6b60fe3914d3f30d0b55cea8fb287209892df03": { + "describe": { + "columns": [ + { + "name": "key_address", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "bytecode", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "creator_address?", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "creator_tx_hash?", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "created_in_block_number", + "ordinal": 4, + "type_info": "Int8" + }, + { + "name": "verification_info", + "ordinal": 5, + "type_info": "Jsonb" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n WITH sl AS (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n )\n SELECT\n sl.key as \"key_address\",\n fd.bytecode,\n txs.initiator_address as \"creator_address?\",\n txs.hash as \"creator_tx_hash?\",\n sl.miniblock_number as \"created_in_block_number\",\n c.verification_info\n FROM sl\n JOIN factory_deps fd ON fd.bytecode_hash = sl.value\n LEFT JOIN transactions txs ON txs.hash = sl.tx_hash\n LEFT JOIN contracts_verification_info c ON c.address = $2\n WHERE sl.value != $3\n " + }, + "e199251d38cb1f18993863f2e7920f21f7867ae1b48ffc905919de7bd98491de": { + "describe": { + "columns": [ + { + "name": "min?", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(miniblock_number) as \"min?\"\n FROM l2_to_l1_logs\n " + }, + "e2023b335b34b24cd0bd8d1d972aa1867a13c78504312fc718e801272c47b559": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "data", + "ordinal": 1, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 2, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + true + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT factory_deps.bytecode, transactions.data, transactions.contract_address\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " + }, + "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + }, + { + "name": "eth_tx_id", + "ordinal": 1, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" + }, + "e33ee15019241ee9307cc447b3f92b54a8348abc8bba5568a3d43b6153d73e9b": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n ORDER BY number DESC \n LIMIT 1\n " + }, + "e42721cc22fbb2bda84f64057586f019cc5122c8e8723f2a9df778b2aa19fffc": { + "describe": { + "columns": [ + { + "name": "version", + "ordinal": 0, + "type_info": "Text" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT version FROM contract_verification_solc_versions ORDER by version" + }, + "e7f7e746aca1c17a8c88aba2db3f7cbd7c639c003580fc72e7b6af4c8ffba595": { + "describe": { + "columns": [ + { + "name": "bytecode_hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "bytecode", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n WHERE miniblock_number >= $1 AND miniblock_number <= $2" + }, + "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" + }, + "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" + }, + "ea1477a0c1509f989c0e2aa308cb59bd34b7ec841d5c6c242257ee8bde27ba83": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "scheduler_witness", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "final_node_aggregations", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "status", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 4, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "attempts", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "aggregation_result_coords", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "scheduler_witness_blob_url", + "ordinal": 11, + "type_info": "Text" + }, + { + "name": "final_node_aggregations_blob_url", + "ordinal": 12, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 13, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + true, + false, + true, + true, + true, + false, + false, + false, + true, + true, + true, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int4", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " + }, + "ec4a3bc6a7a9c13ad11a4b71bed019a961f918a1d1376440c484cc42432c6c9c": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM prover_jobs\n WHERE status = 'successful' AND l1_batch_number = $1 AND aggregation_round = $2\n " + }, + "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + }, + "ee7bd820bf35c5c714092494c386eccff25457cff6dc00eb81d9809eaeb95670": { + "describe": { + "columns": [ + { + "name": "is_replaced!", + "ordinal": 0, + "type_info": "Bool" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Numeric", + "Bytea", + "Jsonb", + "Int4", + "Bytea", + "Numeric", + "Bytea", + "Bytea", + "Int8", + "Int4", + "Int4", + "Timestamp" + ] + } + }, + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, FALSE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n $19, now(), now()\n )\n ON CONFLICT\n (initiator_address, nonce)\n DO UPDATE\n SET hash=$1,\n signature=$4,\n gas_limit=$5,\n max_fee_per_gas=$6,\n max_priority_fee_per_gas=$7,\n gas_per_pubdata_limit=$8,\n input=$9,\n data=$10,\n tx_format=$11,\n contract_address=$12,\n value=$13,\n paymaster=$14,\n paymaster_input=$15,\n execution_info=jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n in_mempool=FALSE,\n received_at=$19,\n created_at=now(),\n updated_at=now(),\n error = NULL\n WHERE transactions.is_priority = FALSE AND transactions.miniblock_number IS NULL\n RETURNING (SELECT hash FROM transactions WHERE transactions.initiator_address = $2 AND transactions.nonce = $3) IS NOT NULL as \"is_replaced!\"\n " + }, + "efc83e42f5d0238b8996a5b311746527289a5a002ff659531a076680127e8eb4": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT hash FROM l1_batches WHERE number = $1" + }, + "f0308ffa4cc34a305150959ad1a30792c0b2bf493c6fa6183725b731a89c11e8": { + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null ], "parameters": { "Left": [ - "Int8" + "Bytea", + "Bytea" ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number != 0 AND commitment IS NOT NULL ORDER BY number LIMIT $1" + "query": "SELECT count(*)\n FROM storage\n WHERE\n address = $1 AND\n value != $2\n " }, - "f5abda9631a44b209b759c6800970d9669a8b5f0280e20ee9901f7c831ab4762": { + "f0c50c53c3883c1ae59263b40e55011760d64350eff411eef856ff301bb70579": { "describe": { "columns": [ { - "name": "value!", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l1_address!", + "name": "l1_batch_tx_index", "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "l2_address!", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "symbol!", - "ordinal": 3, - "type_info": "Varchar" - }, - { - "name": "name!", - "ordinal": 4, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 5, "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 6, - "type_info": "Numeric" } ], "nullable": [ - true, - true, - true, - true, - true, true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", "Bytea" ] } }, - "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1)\n " + "query": "\n SELECT l1_batch_number, l1_batch_tx_index\n FROM transactions\n WHERE hash = $1\n " + }, + "f3f7ceb708cc072d66e8609d64ba99e6faa80bf58ff0ce0ef49e882af63522d4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n INSERT INTO node_aggregation_witness_jobs\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_artifacts', now(), now())\n " }, - "f7ac0e1a473a65b9318c55551c3a83b1316a2270fbafcc73e180bce65496bbe5": { + "f5abda9631a44b209b759c6800970d9669a8b5f0280e20ee9901f7c831ab4762": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "value!", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "scheduler_witness", + "name": "l1_address!", "ordinal": 1, "type_info": "Bytea" }, { - "name": "final_node_aggregations", + "name": "l2_address!", "ordinal": 2, "type_info": "Bytea" }, { - "name": "status", + "name": "symbol!", "ordinal": 3, - "type_info": "Text" + "type_info": "Varchar" }, { - "name": "processing_started_at", + "name": "name!", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Varchar" }, { - "name": "time_taken", + "name": "decimals!", "ordinal": 5, - "type_info": "Time" - }, - { - "name": "error", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "attempts", - "ordinal": 9, "type_info": "Int4" }, { - "name": "aggregation_result_coords", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "scheduler_witness_blob_url", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "final_node_aggregations_blob_url", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 13, - "type_info": "Bool" + "name": "usd_price?", + "ordinal": 6, + "type_info": "Numeric" } ], "nullable": [ - false, - false, - true, - false, true, true, true, - false, - false, - false, true, true, true, - false + true ], "parameters": { "Left": [ - "Interval", - "Int4" + "ByteaArray", + "Bytea", + "Bytea" ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE status = 'queued' \n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " + "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1)\n " }, "f93109d1cc02f5516b40a4a29082a46fd6fa66972bae710d08cfe6a1484b1616": { "describe": { @@ -9265,19 +9807,6 @@ }, "query": "SELECT trace FROM transaction_traces WHERE tx_hash = $1" }, - "fa2e61fb1ad09bb10260598f8daec2a22f5cb74a2a6fa3d6f0cda2d7f62d410e": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "\n UPDATE events SET tx_initiator_address = transactions.initiator_address\n FROM transactions WHERE transactions.hash = events.tx_hash AND events.miniblock_number BETWEEN $1 AND $2\n " - }, "fa33d51f8627376832b11bb174354e65e645ee2fb81564a97725518f47ae6f57": { "describe": { "columns": [ @@ -9424,5 +9953,105 @@ } }, "query": "\n UPDATE contract_verification_requests\n SET status = 'successful', updated_at = now()\n WHERE id = $1\n " + }, + "ff56f2104af03e232748debd5ec2c71495934682fa6ce9212e93084f1eb1087b": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "leaf_layer_subqueues", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "aggregation_outputs", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "number_of_leaf_circuits", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 5, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 6, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "attempts", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "leaf_layer_subqueues_blob_url", + "ordinal": 11, + "type_info": "Text" + }, + { + "name": "aggregation_outputs_blob_url", + "ordinal": 12, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 13, + "type_info": "Bool" + } + ], + "nullable": [ + false, + true, + true, + true, + false, + true, + true, + true, + false, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int4", + "Int8" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM node_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs.*\n " } } \ No newline at end of file diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 6ae5014e46d7..e89e24091736 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -10,13 +10,17 @@ use zksync_types::commitment::{BlockWithMetadata, CommitmentSerializable}; use zksync_types::MAX_GAS_PER_PUBDATA_BYTE; +use zksync_types::helpers::unix_timestamp_ms; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, commitment::BlockMetadata, L1BatchNumber, MiniblockNumber, H256, }; -use crate::{models::storage_block::StorageBlock, StorageProcessor}; +use crate::{ + models::storage_block::{StorageBlock, StorageMiniblockHeader}, + StorageProcessor, +}; #[derive(Debug)] pub struct BlocksDal<'a, 'c> { @@ -96,7 +100,7 @@ impl BlocksDal<'_, '_> { }) } - fn get_storage_block(&mut self, number: L1BatchNumber) -> Option { + pub fn get_storage_block(&mut self, number: L1BatchNumber) -> Option { async_std::task::block_on(async { sqlx::query_as!( StorageBlock, @@ -193,8 +197,9 @@ impl BlocksDal<'_, '_> { timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, + bootloader_code_hash, default_aa_code_hash, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, now(), now()) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now()) ", block.number.0 as i64, block.l1_tx_count as i32, @@ -213,7 +218,15 @@ impl BlocksDal<'_, '_> { used_contract_hashes, base_fee_per_gas, block.l1_gas_price as i64, - block.l2_fair_gas_price as i64 + block.l2_fair_gas_price as i64, + block + .base_system_contracts_hashes + .bootloader + .as_bytes(), + block + .base_system_contracts_hashes + .default_aa + .as_bytes() ) .execute(self.storage.conn()) .await @@ -224,15 +237,16 @@ impl BlocksDal<'_, '_> { pub fn insert_miniblock(&mut self, miniblock_header: MiniblockHeader) { let base_fee_per_gas = BigDecimal::from_u64(miniblock_header.base_fee_per_gas) .expect("base_fee_per_gas should fit in u64"); - async_std::task::block_on(async { sqlx::query!( " INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, - base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, created_at, updated_at + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, + bootloader_code_hash, default_aa_code_hash, + created_at, updated_at ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), now()) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now()) ", miniblock_header.number.0 as i64, miniblock_header.timestamp as i64, @@ -242,7 +256,15 @@ impl BlocksDal<'_, '_> { base_fee_per_gas, miniblock_header.l1_gas_price as i64, miniblock_header.l2_fair_gas_price as i64, - MAX_GAS_PER_PUBDATA_BYTE as i64 + MAX_GAS_PER_PUBDATA_BYTE as i64, + miniblock_header + .base_system_contracts_hashes + .bootloader + .as_bytes(), + miniblock_header + .base_system_contracts_hashes + .default_aa + .as_bytes(), ) .execute(self.storage.conn()) .await @@ -250,15 +272,37 @@ impl BlocksDal<'_, '_> { }) } + pub fn get_last_sealed_miniblock_header(&mut self) -> Option { + async_std::task::block_on(async { + sqlx::query_as!( + StorageMiniblockHeader, + " + SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, + bootloader_code_hash, default_aa_code_hash + FROM miniblocks + ORDER BY number DESC + LIMIT 1 + ", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.into()) + }) + } + pub fn get_miniblock_header( &mut self, miniblock_number: MiniblockNumber, ) -> Option { async_std::task::block_on(async { - sqlx::query!( + sqlx::query_as!( + StorageMiniblockHeader, " SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, - base_fee_per_gas, l1_gas_price, l2_fair_gas_price + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, + bootloader_code_hash, default_aa_code_hash FROM miniblocks WHERE number = $1 ", @@ -267,19 +311,7 @@ impl BlocksDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap() - .map(|row| MiniblockHeader { - number: MiniblockNumber(row.number as u32), - timestamp: row.timestamp as u64, - hash: H256::from_slice(&row.hash), - l1_tx_count: row.l1_tx_count as u16, - l2_tx_count: row.l2_tx_count as u16, - base_fee_per_gas: row - .base_fee_per_gas - .to_u64() - .expect("base_fee_per_gas should fit in u64"), - l1_gas_price: row.l1_gas_price as u64, - l2_fair_gas_price: row.l2_fair_gas_price as u64, - }) + .map(|row| row.into()) }) } @@ -349,24 +381,22 @@ impl BlocksDal<'_, '_> { let update_result = sqlx::query!( " UPDATE l1_batches SET - hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, - compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7, - l2_l1_merkle_root = $8, zkporter_is_available = $9, - bootloader_code_hash = $10, parent_hash = $11, rollup_last_leaf_index = $12, - aux_data_hash = $13, pass_through_data_hash = $14, meta_parameters_hash = $15, + hash = $1, merkle_root_hash = $2, commitment = $3, + compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, + l2_l1_merkle_root = $7, zkporter_is_available = $8, + parent_hash = $9, rollup_last_leaf_index = $10, + aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, updated_at = NOW() - WHERE number = $16 AND hash IS NULL + WHERE number = $14 AND hash IS NULL ", block_metadata.root_hash.as_bytes(), block_metadata.merkle_root_hash.as_bytes(), block_metadata.commitment.as_bytes(), - block_metadata.block_meta_params.default_aa_code_hash.as_bytes(), block_metadata.repeated_writes_compressed, block_metadata.initial_writes_compressed, block_metadata.l2_l1_messages_compressed, block_metadata.l2_l1_merkle_root.as_bytes(), block_metadata.block_meta_params.zkporter_is_available, - block_metadata.block_meta_params.bootloader_code_hash.as_bytes(), previous_root_hash.0.to_vec(), block_metadata.rollup_last_leaf_index as i64, block_metadata.aux_data_hash.as_bytes(), @@ -583,18 +613,44 @@ impl BlocksDal<'_, '_> { }) } - pub fn get_ready_for_execute_blocks(&mut self, limit: usize) -> Vec { + pub fn get_ready_for_execute_blocks( + &mut self, + limit: usize, + l1_batch_min_age_before_execute_seconds: Option, + ) -> Vec { async_std::task::block_on(async { - let l1_batches = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ - WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ - ORDER BY number LIMIT $1", - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); + let l1_batches = match l1_batch_min_age_before_execute_seconds { + None => sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ + ORDER BY number LIMIT $1", + limit as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(), + Some(l1_batch_min_age_before_execute_seconds) => { + let max_l1_batch_timestamp_seconds = + unix_timestamp_ms() / 1000 - l1_batch_min_age_before_execute_seconds; + + sqlx::query_as!( + StorageBlock, + "SELECT l1_batches.* FROM l1_batches \ + JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) \ + WHERE commit_tx.confirmed_at IS NOT NULL \ + AND eth_prove_tx_id IS NOT NULL \ + AND eth_execute_tx_id IS NULL \ + AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 \ + ORDER BY number LIMIT $2", + max_l1_batch_timestamp_seconds as i32, + limit as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + } + }; l1_batches .into_iter() .map(|block| { @@ -605,16 +661,24 @@ impl BlocksDal<'_, '_> { }) } - pub fn get_ready_for_commit_blocks(&mut self, limit: usize) -> Vec { + pub fn get_ready_for_commit_blocks( + &mut self, + limit: usize, + bootloader_hash: H256, + default_aa_hash: H256, + ) -> Vec { async_std::task::block_on(async { let l1_batches = sqlx::query_as!( StorageBlock, - "SELECT * FROM l1_batches \ - WHERE eth_commit_tx_id IS NULL \ - AND number != 0 \ - AND commitment IS NOT NULL \ - ORDER BY number LIMIT $1", - limit as i32 + "SELECT * FROM l1_batches + WHERE eth_commit_tx_id IS NULL + AND number != 0 + AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 + AND commitment IS NOT NULL + ORDER BY number LIMIT $3", + bootloader_hash.as_bytes(), + default_aa_hash.as_bytes(), + limit as i64, ) .fetch_all(self.storage.conn()) .await @@ -828,12 +892,30 @@ impl BlocksDal<'_, '_> { }) } + pub fn get_last_l1_batch_number_with_witness_inputs(&mut self) -> L1BatchNumber { + async_std::task::block_on(async { + sqlx::query!( + r#" + SELECT MAX(l1_batch_number) FROM witness_inputs + WHERE merkel_tree_paths_blob_url IS NOT NULL + "#, + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .max + .map(|l1_batch_number| L1BatchNumber(l1_batch_number as u32)) + .unwrap_or_default() + }) + } + pub fn get_l1_batches_with_blobs_in_db(&mut self, limit: u8) -> Vec { async_std::task::block_on(async { let l1_batches = sqlx::query!( r#" SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 + ORDER BY l1_batch_number DESC LIMIT $1; "#, limit as i32 @@ -878,7 +960,7 @@ impl BlocksDal<'_, '_> { SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs WHERE status='successful' AND is_blob_cleaned=FALSE AND merkel_tree_paths_blob_url is NOT NULL - AND updated_at < NOW() - INTERVAL '2 days' + AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1; "#, limit as i32 diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 8a005b8a71fd..28b8963053ab 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -1,9 +1,12 @@ -use crate::models::storage_eth_tx::{StorageEthTx, StorageTxHistory, StorageTxHistoryToSend}; +use crate::models::storage_eth_tx::{ + L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, +}; use crate::StorageProcessor; +use sqlx::Row; use std::convert::TryFrom; use zksync_types::aggregated_operations::AggregatedActionType; use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; -use zksync_types::{Address, H256, U256}; +use zksync_types::{Address, L1BatchNumber, H256, U256}; #[derive(Debug)] pub struct EthSenderDal<'a, 'c> { @@ -26,6 +29,38 @@ impl EthSenderDal<'_, '_> { }) } + pub fn get_eth_l1_batches(&mut self) -> L1BatchEthSenderStats { + async_std::task::block_on(async { + let mut stats = L1BatchEthSenderStats::default(); + for tx_type in ["execute_tx", "commit_tx", "prove_tx"] { + let records= sqlx::query(&format!( + "SELECT MAX(number) as number, txs.confirmed_at IS NOT NULL as confirmed FROM l1_batches + LEFT JOIN eth_txs_history as txs ON (l1_batches.eth_{}_id = txs.eth_tx_id) + GROUP BY confirmed", + tx_type + )) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + for record in records { + let batch_number = L1BatchNumber(record.get::("number") as u32); + let aggregation_action = match tx_type { + "execute_tx" => AggregatedActionType::ExecuteBlocks, + "commit_tx" => AggregatedActionType::CommitBlocks, + "prove_tx" => AggregatedActionType::PublishProofBlocksOnchain, + _ => unreachable!(), + }; + if record.get::("confirmed") { + stats.mined.push((aggregation_action, batch_number)); + } else { + stats.saved.push((aggregation_action, batch_number)); + } + } + } + stats + }) + } + pub fn get_eth_tx(&mut self, eth_tx_id: u32) -> Option { async_std::task::block_on(async { sqlx::query_as!( diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 59100c7f0ff3..11c0f36a50c7 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -97,26 +97,6 @@ impl EventsDal<'_, '_> { }) } - pub fn set_tx_initiator_address( - &mut self, - from_block_number: MiniblockNumber, - to_block_number: MiniblockNumber, - ) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE events SET tx_initiator_address = transactions.initiator_address - FROM transactions WHERE transactions.hash = events.tx_hash AND events.miniblock_number BETWEEN $1 AND $2 - ", - from_block_number.0 as i64, - to_block_number.0 as i64, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - pub fn save_l2_to_l1_logs( &mut self, block_number: MiniblockNumber, diff --git a/core/lib/dal/src/explorer/contract_verification_dal.rs b/core/lib/dal/src/explorer/contract_verification_dal.rs index b0635ae268b7..f1bb69bd19b8 100644 --- a/core/lib/dal/src/explorer/contract_verification_dal.rs +++ b/core/lib/dal/src/explorer/contract_verification_dal.rs @@ -49,11 +49,12 @@ impl ContractVerificationDal<'_, '_> { compiler_solc_version, optimization_used, constructor_arguments, + is_system, status, created_at, updated_at ) - VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now()) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now()) RETURNING id ", query.contract_address.as_bytes(), @@ -63,6 +64,7 @@ impl ContractVerificationDal<'_, '_> { query.compiler_solc_version, query.optimization_used, query.constructor_arguments.0, + query.is_system, ) .fetch_one(self.storage.conn()) .await @@ -111,6 +113,7 @@ impl ContractVerificationDal<'_, '_> { compiler_solc_version: row.compiler_solc_version, optimization_used: row.optimization_used, constructor_arguments: row.constructor_arguments.into(), + is_system: row.is_system, }, }); Ok(result) diff --git a/core/lib/dal/src/explorer/explorer_blocks_dal.rs b/core/lib/dal/src/explorer/explorer_blocks_dal.rs index 27081a867452..0de5365c4c48 100644 --- a/core/lib/dal/src/explorer/explorer_blocks_dal.rs +++ b/core/lib/dal/src/explorer/explorer_blocks_dal.rs @@ -1,7 +1,15 @@ -use zksync_types::explorer_api::{BlockDetails, BlockPageItem, BlocksQuery, PaginationDirection}; -use zksync_types::MiniblockNumber; +use std::time::Instant; -use crate::models::storage_block::{block_page_item_from_storage, StorageBlockDetails}; +use zksync_types::explorer_api::{ + BlockDetails, BlockPageItem, BlocksQuery, L1BatchDetails, L1BatchPageItem, L1BatchesQuery, + PaginationDirection, +}; +use zksync_types::{L1BatchNumber, MiniblockNumber}; + +use crate::models::storage_block::{ + block_page_item_from_storage, l1_batch_page_item_from_storage, StorageBlockDetails, + StorageL1BatchDetails, +}; use crate::SqlxError; use crate::StorageProcessor; @@ -57,10 +65,12 @@ impl ExplorerBlocksDal<'_, '_> { block_number: MiniblockNumber, ) -> Result, SqlxError> { async_std::task::block_on(async { + let started_at = Instant::now(); let block_details: Option = sqlx::query_as!( StorageBlockDetails, r#" SELECT miniblocks.number, + COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as "l1_batch_number!", miniblocks.timestamp, miniblocks.l1_tx_count, miniblocks.l2_tx_count, @@ -70,7 +80,11 @@ impl ExplorerBlocksDal<'_, '_> { prove_tx.tx_hash as "prove_tx_hash?", prove_tx.confirmed_at as "proven_at?", execute_tx.tx_hash as "execute_tx_hash?", - execute_tx.confirmed_at as "executed_at?" + execute_tx.confirmed_at as "executed_at?", + miniblocks.l1_gas_price, + miniblocks.l2_fair_gas_price, + miniblocks.bootloader_code_hash, + miniblocks.default_aa_code_hash FROM miniblocks LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) @@ -82,7 +96,88 @@ impl ExplorerBlocksDal<'_, '_> { ) .fetch_optional(self.storage.conn()) .await?; + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_block_details"); Ok(block_details.map(BlockDetails::from)) }) } + + pub fn get_l1_batches_page( + &mut self, + query: L1BatchesQuery, + last_verified: L1BatchNumber, + ) -> Result, SqlxError> { + async_std::task::block_on(async { + let (cmp_sign, order_str) = match query.pagination.direction { + PaginationDirection::Older => ("<", "DESC"), + PaginationDirection::Newer => (">", "ASC"), + }; + let cmp_str = if query.from.is_some() { + format!("WHERE l1_batches.number {} $3", cmp_sign) + } else { + "".to_string() + }; + let sql_query_str = format!( + " + SELECT number, l1_tx_count, l2_tx_count, hash, timestamp FROM l1_batches + {} + ORDER BY l1_batches.number {} + LIMIT $1 + OFFSET $2 + ", + cmp_str, order_str + ); + + let mut sql_query = sqlx::query_as(&sql_query_str).bind(query.pagination.limit as i32); + sql_query = sql_query.bind(query.pagination.offset as i32); + if let Some(from) = query.from { + sql_query = sql_query.bind(from.0 as i64); + } + let result = sql_query + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| l1_batch_page_item_from_storage(row, last_verified)) + .collect(); + Ok(result) + }) + } + + pub fn get_l1_batch_details( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Result, SqlxError> { + async_std::task::block_on(async { + let started_at = Instant::now(); + let l1_batch_details: Option = sqlx::query_as!( + StorageL1BatchDetails, + r#" + SELECT l1_batches.number, + l1_batches.timestamp, + l1_batches.l1_tx_count, + l1_batches.l2_tx_count, + l1_batches.hash as "root_hash?", + commit_tx.tx_hash as "commit_tx_hash?", + commit_tx.confirmed_at as "committed_at?", + prove_tx.tx_hash as "prove_tx_hash?", + prove_tx.confirmed_at as "proven_at?", + execute_tx.tx_hash as "execute_tx_hash?", + execute_tx.confirmed_at as "executed_at?", + l1_batches.l1_gas_price, + l1_batches.l2_fair_gas_price, + l1_batches.bootloader_code_hash, + l1_batches.default_aa_code_hash + FROM l1_batches + LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) + WHERE l1_batches.number = $1 + "#, + l1_batch_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await?; + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_l1_batch_details"); + Ok(l1_batch_details.map(L1BatchDetails::from)) + }) + } } diff --git a/core/lib/dal/src/explorer/explorer_transactions_dal.rs b/core/lib/dal/src/explorer/explorer_transactions_dal.rs index 96a9e31fc6cc..bbe563ca28af 100644 --- a/core/lib/dal/src/explorer/explorer_transactions_dal.rs +++ b/core/lib/dal/src/explorer/explorer_transactions_dal.rs @@ -12,8 +12,8 @@ use zksync_types::explorer_api::{ TransactionsResponse, TxPosition, }; use zksync_types::{ - tokens::ETHEREUM_ADDRESS, tx::Execute, Address, MiniblockNumber, H256, L2_ETH_TOKEN_ADDRESS, - U256, U64, + tokens::ETHEREUM_ADDRESS, tx::Execute, Address, L1BatchNumber, MiniblockNumber, H256, + L2_ETH_TOKEN_ADDRESS, U256, U64, }; use crate::models::storage_event::StorageWeb3Log; @@ -112,6 +112,7 @@ impl ExplorerTransactionsDal<'_, '_> { &mut self, from_tx_location: Option, block_number: Option, + l1_batch_number: Option, contract_address: Option
, pagination: PaginationQuery, max_total: usize, @@ -145,6 +146,9 @@ impl ExplorerTransactionsDal<'_, '_> { if let Some(number) = block_number { filters.push(format!("transactions.miniblock_number = {}", number.0)); } + if let Some(number) = l1_batch_number { + filters.push(format!("transactions.l1_batch_number = {}", number.0)); + } let filters: String = if !filters.is_empty() { format!("WHERE {}", filters.join(" AND ")) } else { @@ -337,12 +341,17 @@ impl ExplorerTransactionsDal<'_, '_> { FROM events WHERE ( - topic2 = $1 - OR - topic3 = $1 + ( + ( + topic2 = $1 + OR + topic3 = $1 + ) + AND topic1 = $2 + AND (address IN (SELECT l2_address FROM tokens) OR address = $3) + ) + OR events.tx_initiator_address = $4 ) - AND topic1 = $2 - AND (address IN (SELECT l2_address FROM tokens) OR address = $3) {1} ) AS h WHERE prev_hash IS NULL OR tx_hash != prev_hash @@ -355,7 +364,8 @@ impl ExplorerTransactionsDal<'_, '_> { let sql_count_query = sqlx::query(&sql_count_query_str) .bind(padded_address) .bind(ERC20_TRANSFER_TOPIC.as_bytes().to_vec()) - .bind(L2_ETH_TOKEN_ADDRESS.as_bytes().to_vec()); + .bind(L2_ETH_TOKEN_ADDRESS.as_bytes().to_vec()) + .bind(account_address.as_bytes().to_vec()); let total = sql_count_query .fetch_one(self.storage.conn()) .await? diff --git a/core/lib/dal/src/gpu_prover_queue_dal.rs b/core/lib/dal/src/gpu_prover_queue_dal.rs index 3002a24fb523..a4fc47aed885 100644 --- a/core/lib/dal/src/gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/gpu_prover_queue_dal.rs @@ -32,6 +32,7 @@ impl GpuProverQueueDal<'_, '_> { &mut self, processing_timeout: Duration, specialized_prover_group_id: u8, + region: String, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -45,6 +46,7 @@ impl GpuProverQueueDal<'_, '_> { SELECT instance_host, instance_port FROM gpu_prover_queue WHERE specialized_prover_group_id=$2 + AND region=$3 AND ( instance_status = 'available' OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval) @@ -57,7 +59,8 @@ impl GpuProverQueueDal<'_, '_> { RETURNING gpu_prover_queue.* ", &processing_timeout, - specialized_prover_group_id as i16 + specialized_prover_group_id as i16, + region ) .fetch_optional(self.storage.conn()) .await @@ -76,18 +79,20 @@ impl GpuProverQueueDal<'_, '_> { address: SocketAddress, queue_capacity: usize, specialized_prover_group_id: u8, + region: String, ) { async_std::task::block_on(async { sqlx::query!( " - INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, created_at, updated_at) - VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, now(), now()) - ON CONFLICT(instance_host, instance_port) - DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, updated_at=now()", + INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, created_at, updated_at) + VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, now(), now()) + ON CONFLICT(instance_host, instance_port, region) + DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, updated_at=now()", format!("{}",address.host), address.port as i32, queue_capacity as i32, - specialized_prover_group_id as i16) + specialized_prover_group_id as i16, + region) .execute(self.storage.conn()) .await .unwrap(); diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 0a0a4d12bb11..d94bde4d073e 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -3,13 +3,15 @@ use sqlx::postgres::PgArguments; use std::convert::TryInto; use std::str::FromStr; use thiserror::Error; -use zksync_types::explorer_api::BlockDetails; +use zksync_types::explorer_api::{BlockDetails, L1BatchDetails, L1BatchPageItem}; use sqlx::query::Query; use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; use sqlx::Postgres; +use zksync_contracts::BaseSystemContractsHashes; use zksync_types::api::{self, BlockId}; +use zksync_types::block::MiniblockHeader; use zksync_types::commitment::{BlockMetaParameters, BlockMetadata}; use zksync_types::{ block::L1BatchHeader, @@ -118,6 +120,16 @@ impl From for L1BatchHeader { .base_fee_per_gas .to_u64() .expect("base_fee_per_gas should fit in u64"), + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: block + .bootloader_code_hash + .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) + .expect("Should be not none"), + default_aa: block + .default_aa_code_hash + .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) + .expect("Should be not none"), + }, l1_gas_price: block.l1_gas_price as u64, l2_fair_gas_price: block.l2_fair_gas_price as u64, } @@ -211,6 +223,17 @@ pub struct StorageBlockPageItem { pub timestamp: i64, } +// At the moment it has the same fields as `StorageBlockPageItem` +// but there are no guarantees it won't change in the future. +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageL1BatchPageItem { + pub number: i64, + pub l1_tx_count: i32, + pub l2_tx_count: i32, + pub hash: Option>, + pub timestamp: i64, +} + pub fn block_page_item_from_storage( storage: StorageBlockPageItem, last_verified: MiniblockNumber, @@ -230,6 +253,25 @@ pub fn block_page_item_from_storage( } } +pub fn l1_batch_page_item_from_storage( + storage: StorageL1BatchPageItem, + last_verified: L1BatchNumber, +) -> L1BatchPageItem { + let status = if storage.number > last_verified.0 as i64 { + BlockStatus::Sealed + } else { + BlockStatus::Verified + }; + L1BatchPageItem { + number: L1BatchNumber(storage.number as u32), + l1_tx_count: storage.l1_tx_count as usize, + l2_tx_count: storage.l2_tx_count as usize, + root_hash: storage.hash.map(|hash| H256::from_slice(&hash)), + status, + timestamp: storage.timestamp as u64, + } +} + /// Returns block_number SQL statement and the next argument index that can be used pub fn web3_block_number_to_sql(block_number: api::BlockNumber, arg_index: u8) -> (String, u8) { match block_number { @@ -295,6 +337,7 @@ pub fn bind_block_where_sql_params( #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageBlockDetails { pub number: i64, + pub l1_batch_number: i64, pub timestamp: i64, pub l1_tx_count: i32, pub l2_tx_count: i32, @@ -305,6 +348,10 @@ pub struct StorageBlockDetails { pub proven_at: Option, pub execute_tx_hash: Option, pub executed_at: Option, + pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch + pub l2_fair_gas_price: i64, // L2 gas price assumed in the corresponding batch + pub bootloader_code_hash: Option>, + pub default_aa_code_hash: Option>, } impl From for BlockDetails { @@ -318,6 +365,7 @@ impl From for BlockDetails { }; BlockDetails { number: MiniblockNumber(storage_block_details.number as u32), + l1_batch_number: L1BatchNumber(storage_block_details.l1_batch_number as u32), timestamp: storage_block_details.timestamp as u64, l1_tx_count: storage_block_details.l1_tx_count as usize, l2_tx_count: storage_block_details.l2_tx_count as usize, @@ -347,6 +395,131 @@ impl From for BlockDetails { executed_at: storage_block_details .executed_at .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), + l1_gas_price: storage_block_details.l1_gas_price as u64, + l2_fair_gas_price: storage_block_details.l2_fair_gas_price as u64, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: storage_block_details + .bootloader_code_hash + .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) + .expect("Should be not none"), + default_aa: storage_block_details + .default_aa_code_hash + .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) + .expect("Should be not none"), + }, + } + } +} + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageL1BatchDetails { + pub number: i64, + pub timestamp: i64, + pub l1_tx_count: i32, + pub l2_tx_count: i32, + pub root_hash: Option>, + pub commit_tx_hash: Option, + pub committed_at: Option, + pub prove_tx_hash: Option, + pub proven_at: Option, + pub execute_tx_hash: Option, + pub executed_at: Option, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub bootloader_code_hash: Option>, + pub default_aa_code_hash: Option>, +} + +impl From for L1BatchDetails { + fn from(storage_l1_batch_details: StorageL1BatchDetails) -> Self { + let status = if storage_l1_batch_details.number == 0 + || storage_l1_batch_details.execute_tx_hash.is_some() + { + BlockStatus::Verified + } else { + BlockStatus::Sealed + }; + L1BatchDetails { + number: L1BatchNumber(storage_l1_batch_details.number as u32), + timestamp: storage_l1_batch_details.timestamp as u64, + l1_tx_count: storage_l1_batch_details.l1_tx_count as usize, + l2_tx_count: storage_l1_batch_details.l2_tx_count as usize, + status, + root_hash: storage_l1_batch_details + .root_hash + .as_deref() + .map(H256::from_slice), + commit_tx_hash: storage_l1_batch_details + .commit_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), + committed_at: storage_l1_batch_details + .committed_at + .map(|committed_at| DateTime::::from_utc(committed_at, Utc)), + prove_tx_hash: storage_l1_batch_details + .prove_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + proven_at: storage_l1_batch_details + .proven_at + .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), + execute_tx_hash: storage_l1_batch_details + .execute_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + executed_at: storage_l1_batch_details + .executed_at + .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), + l1_gas_price: storage_l1_batch_details.l1_gas_price as u64, + l2_fair_gas_price: storage_l1_batch_details.l2_fair_gas_price as u64, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: storage_l1_batch_details + .bootloader_code_hash + .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) + .expect("Should be not none"), + default_aa: storage_l1_batch_details + .default_aa_code_hash + .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) + .expect("Should be not none"), + }, + } + } +} + +pub struct StorageMiniblockHeader { + pub number: i64, + pub timestamp: i64, + pub hash: Vec, + pub l1_tx_count: i32, + pub l2_tx_count: i32, + pub base_fee_per_gas: BigDecimal, + pub l1_gas_price: i64, // L1 gas price assumed in the corresponding batch + pub l2_fair_gas_price: i64, // L2 gas price assumed in the corresponding batch + pub bootloader_code_hash: Option>, + pub default_aa_code_hash: Option>, +} + +impl From for MiniblockHeader { + fn from(row: StorageMiniblockHeader) -> Self { + MiniblockHeader { + number: MiniblockNumber(row.number as u32), + timestamp: row.timestamp as u64, + hash: H256::from_slice(&row.hash), + l1_tx_count: row.l1_tx_count as u16, + l2_tx_count: row.l2_tx_count as u16, + base_fee_per_gas: row.base_fee_per_gas.to_u64().unwrap(), + l1_gas_price: row.l1_gas_price as u64, + l2_fair_gas_price: row.l2_fair_gas_price as u64, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: row + .bootloader_code_hash + .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) + .expect("Should be not none"), + default_aa: row + .default_aa_code_hash + .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) + .expect("Should be not none"), + }, } } } diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index 21fc9c559730..92c76cce393a 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -2,7 +2,7 @@ use sqlx::types::chrono::NaiveDateTime; use std::str::FromStr; use zksync_types::aggregated_operations::AggregatedActionType; use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; -use zksync_types::{Address, H256}; +use zksync_types::{Address, L1BatchNumber, H256}; #[derive(Debug, Clone)] pub struct StorageEthTx { @@ -20,6 +20,12 @@ pub struct StorageEthTx { pub sent_at_block: Option, } +#[derive(Debug, Default)] +pub struct L1BatchEthSenderStats { + pub saved: Vec<(AggregatedActionType, L1BatchNumber)>, + pub mined: Vec<(AggregatedActionType, L1BatchNumber)>, +} + #[derive(Clone, Debug)] pub struct StorageTxHistoryToSend { pub id: i32, diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index f7ddeb2a463b..16ea89f4bf5b 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -330,6 +330,10 @@ impl From for Transaction { sender: Address::from_slice(&tx.initiator_address), serial_id: PriorityOpId(tx.priority_op_id.unwrap() as u64), gas_limit, + max_fee_per_gas: tx + .max_fee_per_gas + .map(bigdecimal_to_u256) + .unwrap_or_default(), to_mint, refund_recipient, // Using 1 for old transactions that did not have the necessary field stored diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index 588bcf7439d4..1c18bdc00b19 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -1,8 +1,7 @@ use std::collections::HashMap; -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; use std::ops::Range; use std::time::{Duration, Instant}; - use zksync_object_store::gcs_utils::prover_circuit_input_blob_url; use zksync_types::aggregated_operations::BlockProofForL1; use zksync_types::proofs::{ @@ -65,6 +64,28 @@ impl ProverDal<'_, '_> { }) } + pub fn get_proven_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { + async_std::task::block_on(async { + sqlx::query!( + r#"SELECT MAX(l1_batch_number) as "l1_batch_number!", aggregation_round FROM prover_jobs + WHERE status='successful' + GROUP BY aggregation_round + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|record| { + ( + L1BatchNumber(record.l1_batch_number as u32), + record.aggregation_round.try_into().unwrap(), + ) + }) + .collect() + }) + } + pub fn get_next_prover_job_by_circuit_types( &mut self, processing_timeout: Duration, @@ -85,6 +106,7 @@ impl ProverDal<'_, '_> { AND ( status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'in_gpu_proof' AND processing_started_at < now() - $1::interval) OR (status = 'failed' AND attempts < $2) ) ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC @@ -258,6 +280,39 @@ impl ProverDal<'_, '_> { }) } + pub fn get_prover_jobs_stats_per_circuit(&mut self) -> HashMap { + async_std::task::block_on(async { + sqlx::query!( + r#" + SELECT COUNT(*) as "count!", circuit_type as "circuit_type!", status as "status!" + FROM prover_jobs + GROUP BY circuit_type, status + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.circuit_type, row.status, row.count as usize)) + .fold(HashMap::new(), |mut acc, (circuit_type, status, value)| { + let stats = acc.entry(circuit_type).or_insert(JobCountStatistics { + queued: 0, + in_progress: 0, + failed: 0, + successful: 0, + }); + match status.as_ref() { + "queued" => stats.queued = value, + "in_progress" => stats.in_progress = value, + "failed" => stats.failed = value, + "successful" => stats.successful = value, + _ => (), + } + acc + }) + }) + } + pub fn get_prover_jobs_stats(&mut self) -> JobCountStatistics { async_std::task::block_on(async { let mut results: HashMap = sqlx::query!( @@ -468,23 +523,6 @@ impl ProverDal<'_, '_> { }) } - pub fn get_l1_batches_with_blobs_in_db(&mut self, limit: u8) -> Vec { - async_std::task::block_on(async { - let job_ids = sqlx::query!( - r#" - SELECT id FROM prover_jobs - WHERE length(prover_input) <> 0 - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - job_ids.into_iter().map(|row| row.id).collect() - }) - } - pub fn get_circuit_input_blob_urls_to_be_cleaned(&mut self, limit: u8) -> Vec<(i64, String)> { async_std::task::block_on(async { let job_ids = sqlx::query!( @@ -492,7 +530,7 @@ impl ProverDal<'_, '_> { SELECT id, circuit_input_blob_url FROM prover_jobs WHERE status='successful' AND is_blob_cleaned=FALSE AND circuit_input_blob_url is NOT NULL - AND updated_at < NOW() - INTERVAL '2 days' + AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1; "#, limit as i32 @@ -523,22 +561,6 @@ impl ProverDal<'_, '_> { }) } - pub fn purge_blobs_from_db(&mut self, job_ids: Vec) { - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE prover_jobs - SET prover_input='' - WHERE id = ANY($1); - "#, - &job_ids[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - pub fn update_status(&mut self, id: u32, status: &str) { async_std::task::block_on(async { sqlx::query!( diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index a38e793f4830..34716eca2f7f 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -2,12 +2,13 @@ use crate::models::storage_contract::StorageContractSource; use crate::StorageProcessor; use std::collections::{HashMap, HashSet}; use std::time::Instant; +use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_types::{ vm_trace::ContractSourceDebugInfo, Address, MiniblockNumber, StorageKey, StorageLog, StorageValue, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, }; -use zksync_utils::{bytes_to_chunks, h256_to_account_address}; +use zksync_utils::{bytes_to_be_words, bytes_to_chunks, h256_to_account_address}; #[derive(Debug)] pub struct StorageDal<'a, 'c> { @@ -58,6 +59,35 @@ impl StorageDal<'_, '_> { }) } + pub fn get_base_system_contracts( + &mut self, + bootloader_hash: H256, + default_aa_hash: H256, + ) -> BaseSystemContracts { + async_std::task::block_on(async { + let bootloader_bytecode = self + .get_factory_dep(bootloader_hash) + .expect("Bootloader code should be presented in the database"); + let bootloader_code = SystemContractCode { + code: bytes_to_be_words(bootloader_bytecode), + hash: bootloader_hash, + }; + + let default_aa_bytecode = self + .get_factory_dep(default_aa_hash) + .expect("Default account code should be presented in the database"); + + let default_aa_code = SystemContractCode { + code: bytes_to_be_words(default_aa_bytecode), + hash: default_aa_hash, + }; + BaseSystemContracts { + bootloader: bootloader_code, + default_aa: default_aa_code, + } + }) + } + pub fn get_factory_deps(&mut self, hashes: &HashSet) -> HashMap> { let hashes_as_vec_u8: Vec> = hashes.iter().map(|hash| hash.0.to_vec()).collect(); diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 01a514d401b8..b4f05c540f3f 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,8 +1,8 @@ use crate::StorageProcessor; use sqlx::types::chrono::Utc; use std::collections::{HashMap, HashSet}; -use vm::zk_evm::ethereum_types::H256; -use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLogQuery}; +use vm::zk_evm::aux_structures::LogQuery; +use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; use zksync_utils::u256_to_h256; #[derive(Debug)] @@ -11,7 +11,7 @@ pub struct StorageLogsDedupDal<'a, 'c> { } impl StorageLogsDedupDal<'_, '_> { - pub fn insert_storage_logs(&mut self, block_number: L1BatchNumber, logs: &[StorageLogQuery]) { + pub fn insert_storage_logs(&mut self, block_number: L1BatchNumber, logs: &[LogQuery]) { async_std::task::block_on(async { let mut copy = self .storage @@ -25,8 +25,7 @@ impl StorageLogsDedupDal<'_, '_> { let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); - for (operation_number, log_query) in logs.iter().enumerate() { - let log = &log_query.log_query; + for (operation_number, log) in logs.iter().enumerate() { let hashed_key_str = format!( "\\\\x{}", hex::encode(StorageKey::raw_hashed_key( @@ -74,7 +73,7 @@ impl StorageLogsDedupDal<'_, '_> { pub fn insert_protective_reads( &mut self, l1_batch_number: L1BatchNumber, - read_logs: &[StorageLogQuery], + read_logs: &[LogQuery], ) { async_std::task::block_on(async { let mut copy = self @@ -89,8 +88,7 @@ impl StorageLogsDedupDal<'_, '_> { let mut bytes: Vec = Vec::new(); let now = Utc::now().naive_utc().to_string(); - for log_query in read_logs.iter() { - let log = &log_query.log_query; + for log in read_logs.iter() { let address_str = format!("\\\\x{}", hex::encode(log.address.0)); let key_str = format!("\\\\x{}", hex::encode(u256_to_h256(log.key).0)); let row = format!( @@ -107,17 +105,13 @@ impl StorageLogsDedupDal<'_, '_> { pub fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, - write_logs: &[StorageLogQuery], + write_logs: &[LogQuery], ) { async_std::task::block_on(async { let hashed_keys: Vec<_> = write_logs .iter() .map(|log| { - StorageKey::raw_hashed_key( - &log.log_query.address, - &u256_to_h256(log.log_query.key), - ) - .to_vec() + StorageKey::raw_hashed_key(&log.address, &u256_to_h256(log.key)).to_vec() }) .collect(); @@ -310,50 +304,4 @@ impl StorageLogsDedupDal<'_, '_> { .collect() }) } - - pub fn migrate_protective_reads( - &mut self, - from_l1_batch_number: L1BatchNumber, - to_l1_batch_number: L1BatchNumber, - ) { - async_std::task::block_on(async { - sqlx::query!( - "INSERT INTO protective_reads (l1_batch_number, address, key, created_at, updated_at) - SELECT storage_logs_dedup.l1_batch_number, storage_logs_dedup.address, storage_logs_dedup.key, now(), now() - FROM storage_logs_dedup - WHERE l1_batch_number BETWEEN $1 AND $2 - AND is_write = FALSE - ON CONFLICT DO NOTHING - ", - from_l1_batch_number.0 as i64, - to_l1_batch_number.0 as i64, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - - pub fn migrate_initial_writes( - &mut self, - from_l1_batch_number: L1BatchNumber, - to_l1_batch_number: L1BatchNumber, - ) { - async_std::task::block_on(async { - sqlx::query!( - "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at) - SELECT storage_logs_dedup.hashed_key, storage_logs_dedup.l1_batch_number, now(), now() - FROM storage_logs_dedup - WHERE l1_batch_number BETWEEN $1 AND $2 - AND is_write = TRUE - ON CONFLICT DO NOTHING - ", - from_l1_batch_number.0 as i64, - to_l1_batch_number.0 as i64, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 72c515e777e1..6f9947f82c71 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -3,6 +3,7 @@ use std::time::Duration; use db_test_macro::db_test; use zksync_types::block::{L1BatchHeader, MiniblockHeader}; use zksync_types::proofs::AggregationRound; +use zksync_types::MAX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ fee::{Fee, TransactionExecutionMetrics}, helpers::unix_timestamp_ms, @@ -12,7 +13,6 @@ use zksync_types::{ Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, PriorityOpId, H160, H256, U256, }; -use zksync_types::{FAIR_L2_GAS_PRICE, MAX_GAS_PER_PUBDATA_BYTE}; use crate::blocks_dal::BlocksDal; use crate::prover_dal::{GetProverJobsParams, ProverDal}; @@ -29,7 +29,7 @@ const DEFAULT_GAS_PER_PUBDATA: u32 = 100; fn mock_l2_transaction() -> L2Tx { let fee = Fee { gas_limit: U256::from(1_000_000u32), - max_fee_per_gas: FAIR_L2_GAS_PRICE.into(), + max_fee_per_gas: U256::from(250_000_000u32), max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), }; @@ -60,6 +60,7 @@ fn mock_l1_execute() -> L1Tx { layer_2_tip_fee: U256::zero(), full_fee: U256::zero(), gas_limit: U256::from(100_100), + max_fee_per_gas: U256::from(1u32), gas_per_pubdata_limit: MAX_GAS_PER_PUBDATA_BYTE.into(), op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, @@ -160,6 +161,7 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { base_fee_per_gas: Default::default(), l1_gas_price: 0, l2_fair_gas_price: 0, + base_system_contracts_hashes: Default::default(), }); transactions_dal.mark_txs_as_executed_in_miniblock( MiniblockNumber(1), @@ -170,6 +172,7 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, + compressed_bytecodes: vec![], }], U256::from(1), ); @@ -199,7 +202,12 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; let block_number = 1; - let header = L1BatchHeader::mock(L1BatchNumber(block_number)); + let header = L1BatchHeader::new( + L1BatchNumber(block_number), + 0, + Default::default(), + Default::default(), + ); storage .blocks_dal() .insert_l1_batch(header, Default::default()); diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index a9be90932052..8295709940c8 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -28,6 +28,7 @@ pub enum L2TxSubmissionResult { Replaced, AlreadyExecuted, Duplicate, + Proxied, } impl fmt::Display for L2TxSubmissionResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -49,6 +50,7 @@ impl TransactionsDal<'_, '_> { let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.gas_limit); + let max_fee_per_gas = u256_to_big_decimal(tx.common_data.max_fee_per_gas); let full_fee = u256_to_big_decimal(tx.common_data.full_fee); let layer_2_tip_fee = u256_to_big_decimal(tx.common_data.layer_2_tip_fee); let sender = tx.common_data.sender.0.to_vec(); @@ -73,6 +75,7 @@ impl TransactionsDal<'_, '_> { initiator_address, gas_limit, + max_fee_per_gas, gas_per_pubdata_limit, data, @@ -97,12 +100,13 @@ impl TransactionsDal<'_, '_> { VALUES ( $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, - $13, $14, $15, $16, $17, now(), now() + $13, $14, $15, $16, $17, $18, now(), now() ) ", tx_hash, sender, gas_limit, + max_fee_per_gas, gas_per_pubdata_limit, json_data, serial_id, @@ -318,30 +322,6 @@ impl TransactionsDal<'_, '_> { }) } - pub fn set_correct_tx_type_for_priority_operations(&mut self, limit: u32) -> bool { - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE transactions - SET tx_format=255 - WHERE hash IN ( - SELECT hash - FROM transactions - WHERE is_priority = true - AND tx_format is null - LIMIT $1 - ) - RETURNING tx_format - "#, - limit as i32 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .is_some() - }) - } - pub fn mark_txs_as_executed_in_miniblock( &mut self, miniblock_number: MiniblockNumber, @@ -353,8 +333,13 @@ impl TransactionsDal<'_, '_> { let mut l1_indices_in_block = Vec::with_capacity(transactions.len()); let mut l1_errors = Vec::with_capacity(transactions.len()); let mut l1_execution_infos = Vec::with_capacity(transactions.len()); + let mut l1_refunded_gas = Vec::with_capacity(transactions.len()); let mut l2_hashes = Vec::with_capacity(transactions.len()); + let mut l2_values = Vec::with_capacity(transactions.len()); + let mut l2_contract_addresses = Vec::with_capacity(transactions.len()); + let mut l2_paymaster = Vec::with_capacity(transactions.len()); + let mut l2_paymaster_input = Vec::with_capacity(transactions.len()); let mut l2_indices_in_block = Vec::with_capacity(transactions.len()); let mut l2_initiators = Vec::with_capacity(transactions.len()); let mut l2_nonces = Vec::with_capacity(transactions.len()); @@ -399,9 +384,17 @@ impl TransactionsDal<'_, '_> { l1_indices_in_block.push(index_in_block as i32); l1_errors.push(error.unwrap_or_default()); l1_execution_infos.push(serde_json::to_value(execution_info).unwrap()); + l1_refunded_gas.push(*refunded_gas as i64); } ExecuteTransactionCommon::L2(common_data) => { let data = serde_json::to_value(&transaction.execute).unwrap(); + l2_values.push(u256_to_big_decimal(transaction.execute.value)); + l2_contract_addresses + .push(transaction.execute.contract_address.as_bytes().to_vec()); + l2_paymaster_input + .push(common_data.paymaster_params.paymaster_input.clone()); + l2_paymaster + .push(common_data.paymaster_params.paymaster.as_bytes().to_vec()); l2_hashes.push(hash.0.to_vec()); l2_indices_in_block.push(index_in_block as i32); l2_initiators.push(transaction.initiator_account().0.to_vec()); @@ -450,12 +443,16 @@ impl TransactionsDal<'_, '_> { input = data_table.input, data = data_table.data, tx_format = data_table.tx_format, - miniblock_number = $17, + miniblock_number = $21, index_in_block = data_table.index_in_block, error = NULLIF(data_table.error, ''), effective_gas_price = data_table.effective_gas_price, execution_info = data_table.new_execution_info, refunded_gas = data_table.refunded_gas, + value = data_table.value, + contract_address = data_table.contract_address, + paymaster = data_table.paymaster, + paymaster_input = data_table.paymaster_input, in_mempool = FALSE, updated_at = now() FROM @@ -476,7 +473,11 @@ impl TransactionsDal<'_, '_> { UNNEST($13::jsonb[]) AS new_execution_info, UNNEST($14::bytea[]) AS input, UNNEST($15::jsonb[]) AS data, - UNNEST($16::bigint[]) as refunded_gas + UNNEST($16::bigint[]) as refunded_gas, + UNNEST($17::numeric[]) as value, + UNNEST($18::bytea[]) as contract_address, + UNNEST($19::bytea[]) as paymaster, + UNNEST($20::bytea[]) as paymaster_input ) AS data_table WHERE transactions.initiator_address=data_table.initiator_address AND transactions.nonce=data_table.nonce @@ -497,6 +498,10 @@ impl TransactionsDal<'_, '_> { &l2_inputs, &l2_datas, &l2_refunded_gas, + &l2_values, + &l2_contract_addresses, + &l2_paymaster, + &l2_paymaster_input, miniblock_number.0 as i32, ) .execute(self.storage.conn()) @@ -515,6 +520,7 @@ impl TransactionsDal<'_, '_> { error = NULLIF(data_table.error, ''), in_mempool=FALSE, execution_info = execution_info || data_table.new_execution_info, + refunded_gas = data_table.refunded_gas, updated_at = now() FROM ( @@ -522,7 +528,8 @@ impl TransactionsDal<'_, '_> { UNNEST($2::bytea[]) AS hash, UNNEST($3::integer[]) AS index_in_block, UNNEST($4::varchar[]) AS error, - UNNEST($5::jsonb[]) AS new_execution_info + UNNEST($5::jsonb[]) AS new_execution_info, + UNNEST($6::bigint[]) as refunded_gas ) AS data_table WHERE transactions.hash = data_table.hash "#, @@ -530,7 +537,8 @@ impl TransactionsDal<'_, '_> { &l1_hashes, &l1_indices_in_block, &l1_errors, - &l1_execution_infos + &l1_execution_infos, + &l1_refunded_gas ) .execute(self.storage.conn()) .await diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 904750c508e8..d8a22bbf9c41 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -5,8 +5,8 @@ use zksync_types::{ BlockId, BlockNumber, L2ToL1Log, Log, Transaction, TransactionDetails, TransactionId, TransactionReceipt, }, - Address, L2ChainId, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, - H160, H256, U256, U64, + Address, L2ChainId, MiniblockNumber, ACCOUNT_CODE_STORAGE_ADDRESS, + FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, U256, U64, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; @@ -14,7 +14,8 @@ use crate::models::{ storage_block::{bind_block_where_sql_params, web3_block_where_sql}, storage_event::{StorageL2ToL1Log, StorageWeb3Log}, storage_transaction::{ - extract_web3_transaction, web3_transaction_select_sql, StorageTransactionDetails, + extract_web3_transaction, web3_transaction_select_sql, StorageTransaction, + StorageTransactionDetails, }, }; use crate::SqlxError; @@ -345,4 +346,29 @@ impl TransactionsWeb3Dal<'_, '_> { Ok(U256::from(pending_nonce)) }) } + + /// Returns the server transactions (not API ones) from a certain miniblock. + /// Returns an empty list if the miniblock doesn't exist. + pub fn get_raw_miniblock_transactions( + &mut self, + miniblock: MiniblockNumber, + ) -> Result, SqlxError> { + async_std::task::block_on(async { + let txs = sqlx::query_as!( + StorageTransaction, + " + SELECT * FROM transactions + WHERE miniblock_number = $1 + ORDER BY index_in_block + ", + miniblock.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(zksync_types::Transaction::from) + .collect(); + Ok(txs) + }) + } } diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index ad95917dd7dd..61b4cecce7a6 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -34,6 +34,7 @@ impl WitnessGeneratorDal<'_, '_> { &mut self, processing_timeout: Duration, max_attempts: u32, + last_l1_batch_to_process: u32, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -45,9 +46,12 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = ( SELECT l1_batch_number FROM witness_inputs - WHERE status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -56,7 +60,8 @@ impl WitnessGeneratorDal<'_, '_> { RETURNING witness_inputs.* ", &processing_timeout, - max_attempts as i32 + max_attempts as i32, + last_l1_batch_to_process as i64 ) .fetch_optional(self.storage.conn()) .await @@ -70,10 +75,46 @@ impl WitnessGeneratorDal<'_, '_> { }) } + pub fn get_witness_generated_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { + [ + "node_aggregation_witness_jobs", + "leaf_aggregation_witness_jobs", + "scheduler_witness_jobs", + "witness_inputs", + ] + .map(|round| { + async_std::task::block_on(async { + let record = sqlx::query(&format!( + "SELECT MAX(l1_batch_number) as l1_batch FROM {} WHERE status='successful'", + round + )) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + ( + L1BatchNumber( + record + .get::, &str>("l1_batch") + .unwrap_or_default() as u32, + ), + match round { + "node_aggregation_witness_jobs" => AggregationRound::NodeAggregation, + "leaf_aggregation_witness_jobs" => AggregationRound::LeafAggregation, + "scheduler_witness_jobs" => AggregationRound::Scheduler, + "witness_inputs" => AggregationRound::BasicCircuits, + _ => unreachable!(), + }, + ) + }) + }) + .to_vec() + } + pub fn get_next_leaf_aggregation_witness_job( &mut self, processing_timeout: Duration, max_attempts: u32, + last_l1_batch_to_process: u32, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -85,9 +126,12 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = ( SELECT l1_batch_number FROM leaf_aggregation_witness_jobs - WHERE status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -95,7 +139,8 @@ impl WitnessGeneratorDal<'_, '_> { ) RETURNING leaf_aggregation_witness_jobs.* ", &processing_timeout, - max_attempts as i32 + max_attempts as i32, + last_l1_batch_to_process as i64 ) .fetch_optional(self.storage.conn()) .await @@ -133,6 +178,7 @@ impl WitnessGeneratorDal<'_, '_> { &mut self, processing_timeout: Duration, max_attempts: u32, + last_l1_batch_to_process: u32, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -144,9 +190,12 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = ( SELECT l1_batch_number FROM node_aggregation_witness_jobs - WHERE status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -154,7 +203,8 @@ impl WitnessGeneratorDal<'_, '_> { ) RETURNING node_aggregation_witness_jobs.* ", &processing_timeout, - max_attempts as i32 + max_attempts as i32, + last_l1_batch_to_process as i64, ) .fetch_optional(self.storage.conn()) .await @@ -189,6 +239,7 @@ impl WitnessGeneratorDal<'_, '_> { &mut self, processing_timeout: Duration, max_attempts: u32, + last_l1_batch_to_process: u32, ) -> Option { async_std::task::block_on(async { let processing_timeout = pg_interval_from_duration(processing_timeout); @@ -200,9 +251,12 @@ impl WitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = ( SELECT l1_batch_number FROM scheduler_witness_jobs - WHERE status = 'queued' - OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) - OR (status = 'failed' AND attempts < $2) + WHERE l1_batch_number <= $3 + AND + ( status = 'queued' + OR (status = 'in_progress' AND processing_started_at < now() - $1::interval) + OR (status = 'failed' AND attempts < $2) + ) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -210,7 +264,8 @@ impl WitnessGeneratorDal<'_, '_> { ) RETURNING scheduler_witness_jobs.* ", &processing_timeout, - max_attempts as i32 + max_attempts as i32, + last_l1_batch_to_process as i64 ) .fetch_optional(self.storage.conn()) .await @@ -458,7 +513,9 @@ impl WitnessGeneratorDal<'_, '_> { /// Saves artifacts in node_aggregation_job /// and advances it to `waiting_for_proofs` status - /// it will be advanced to `queued` by the prover when all the dependency proofs are computed + /// it will be advanced to `queued` by the prover when all the dependency proofs are computed. + /// If the node aggregation job was already `queued` in case of connrecunt run of same leaf aggregation job + /// we keep the status as is to prevent data race. pub fn save_leaf_aggregation_artifacts( &mut self, block_number: L1BatchNumber, @@ -474,7 +531,7 @@ impl WitnessGeneratorDal<'_, '_> { aggregation_outputs_blob_url = $4, status = 'waiting_for_proofs', updated_at = now() - WHERE l1_batch_number = $2 + WHERE l1_batch_number = $2 AND status != 'queued' ", number_of_leaf_circuits as i64, block_number.0 as i64, @@ -491,16 +548,19 @@ impl WitnessGeneratorDal<'_, '_> { /// Saves artifacts in scheduler_artifacts_jobs` /// and advances it to `waiting_for_proofs` status - /// it will be advanced to `queued` by the prover when all the dependency proofs are computed + /// it will be advanced to `queued` by the prover when all the dependency proofs are computed. + /// If the scheduler witness job was already `queued` in case of connrecunt run of same node aggregation job + /// we keep the status as is to prevent data race. pub fn save_node_aggregation_artifacts(&mut self, block_number: L1BatchNumber) { async_std::task::block_on(async { let started_at = Instant::now(); sqlx::query!( " UPDATE scheduler_witness_jobs - SET final_node_aggregations_blob_url = $2, status = 'waiting_for_proofs', - updated_at = now() - WHERE l1_batch_number = $1 + SET final_node_aggregations_blob_url = $2, + status = 'waiting_for_proofs', + updated_at = now() + WHERE l1_batch_number = $1 AND status != 'queued' ", block_number.0 as i64, final_node_aggregations_blob_url(block_number), @@ -676,30 +736,6 @@ impl WitnessGeneratorDal<'_, '_> { .collect()) } - pub fn get_leaf_aggregation_l1_batches_with_blobs_in_db( - &mut self, - limit: u8, - ) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query!( - r#" - SELECT l1_batch_number FROM leaf_aggregation_witness_jobs - WHERE length(basic_circuits) <> 0 - OR length(basic_circuits_inputs) <> 0 - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - .collect() - }) - } - pub fn save_witness_inputs(&mut self, block_number: L1BatchNumber) { async_std::task::block_on(async { sqlx::query!( @@ -716,112 +752,6 @@ impl WitnessGeneratorDal<'_, '_> { }) } - pub fn purge_leaf_aggregation_blobs_from_db(&mut self, l1_batches: Vec) { - let l1_batches: Vec = l1_batches - .iter() - .map(|l1_batch| l1_batch.0 as i64) - .collect(); - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE leaf_aggregation_witness_jobs - SET basic_circuits='', - basic_circuits_inputs='' - WHERE l1_batch_number = ANY($1); - "#, - &l1_batches[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - - pub fn get_node_aggregation_l1_batches_with_blobs_in_db( - &mut self, - limit: u8, - ) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query!( - r#" - SELECT l1_batch_number FROM node_aggregation_witness_jobs - WHERE length(leaf_layer_subqueues) <> 0 - OR length(aggregation_outputs) <> 0 - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - .collect() - }) - } - - pub fn purge_node_aggregation_blobs_from_db(&mut self, l1_batches: Vec) { - let l1_batches: Vec = l1_batches - .iter() - .map(|l1_batch| l1_batch.0 as i64) - .collect(); - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs - SET leaf_layer_subqueues='', - aggregation_outputs='' - WHERE l1_batch_number = ANY($1); - "#, - &l1_batches[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - - pub fn get_scheduler_l1_batches_with_blobs_in_db(&mut self, limit: u8) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query!( - r#" - SELECT l1_batch_number FROM scheduler_witness_jobs - WHERE length(final_node_aggregations) <> 0 - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - .collect() - }) - } - - pub fn purge_scheduler_blobs_from_db(&mut self, l1_batches: Vec) { - let l1_batches: Vec = l1_batches - .iter() - .map(|l1_batch| l1_batch.0 as i64) - .collect(); - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE scheduler_witness_jobs - SET final_node_aggregations='' - WHERE l1_batch_number = ANY($1); - "#, - &l1_batches[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) - } - pub fn get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned( &mut self, limit: u8, @@ -833,7 +763,7 @@ impl WitnessGeneratorDal<'_, '_> { WHERE status='successful' AND is_blob_cleaned=FALSE AND basic_circuits_blob_url is NOT NULL AND basic_circuits_inputs_blob_url is NOT NULL - AND updated_at < NOW() - INTERVAL '2 days' + AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1; "#, limit as i32 @@ -867,7 +797,7 @@ impl WitnessGeneratorDal<'_, '_> { WHERE status='successful' AND is_blob_cleaned=FALSE AND leaf_layer_subqueues_blob_url is NOT NULL AND aggregation_outputs_blob_url is NOT NULL - AND updated_at < NOW() - INTERVAL '2 days' + AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1; "#, limit as i32 @@ -899,7 +829,7 @@ impl WitnessGeneratorDal<'_, '_> { r#" SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs WHERE status='successful' AND is_blob_cleaned=FALSE - AND updated_at < NOW() - INTERVAL '2 days' + AND updated_at < NOW() - INTERVAL '30 days' AND scheduler_witness_blob_url is NOT NULL AND final_node_aggregations_blob_url is NOT NULL LIMIT $1; diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index b8f174cbf450..ddb5ae6bc323 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -392,6 +392,7 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { layer_2_tip_fee: U256::zero(), full_fee: U256::zero(), gas_limit: U256::zero(), + max_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::one(), op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, diff --git a/core/lib/merkle_tree/src/zksync_tree.rs b/core/lib/merkle_tree/src/zksync_tree.rs index 2cc4a80f95d3..72aea1ab366e 100644 --- a/core/lib/merkle_tree/src/zksync_tree.rs +++ b/core/lib/merkle_tree/src/zksync_tree.rs @@ -151,6 +151,7 @@ impl ZkSyncTree { .flat_map(|(i, logs)| logs.into_iter().map(move |log| (i, log))) .collect(); + let total_logs = storage_logs_with_blocks.len(); let mut leaf_indices = self .storage .process_leaf_indices(&storage_logs_with_blocks)?; @@ -163,17 +164,20 @@ impl ZkSyncTree { }) .collect(); - metrics::histogram!("merkle_tree.leaf_index_update", start.elapsed()); - let start = Instant::now(); + metrics::histogram!("merkle_tree.stage", start.elapsed(), "stage" => "leaf_index_update"); + metrics::histogram!("merkle_tree.average.stage", start.elapsed().div_f64(total_logs as f64), "stage" => "leaf_index_update"); + let start = Instant::now(); let prepared_updates = self.prepare_batch_update(storage_logs_with_indices)?; - metrics::histogram!("merkle_tree.prepare_update", start.elapsed()); + metrics::histogram!("merkle_tree.stage", start.elapsed(), "stage" => "read_hashes"); + metrics::histogram!("merkle_tree.average.stage", start.elapsed().div_f64(total_logs as f64), "stage" => "read_hashes"); let start = Instant::now(); let updates = prepared_updates.calculate(self.hasher().clone())?; - metrics::histogram!("merkle_tree.root_calculation", start.elapsed()); + metrics::histogram!("merkle_tree.stage", start.elapsed(), "stage" => "calculate"); + metrics::histogram!("merkle_tree.average.stage", start.elapsed().div_f64(total_logs as f64), "stage" => "calculate"); let start = Instant::now(); @@ -237,7 +241,8 @@ impl ZkSyncTree { } }; - metrics::histogram!("merkle_tree.patch_application", start.elapsed()); + metrics::histogram!("merkle_tree.stage", start.elapsed(), "stage" => "apply_patch"); + metrics::histogram!("merkle_tree.average.stage", start.elapsed().div_f64(total_logs as f64), "stage" => "apply_patch"); self.block_number += total_blocks as u32; Ok(tree_metadata) diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index deccb8bd0996..d8cf4481064d 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -10,13 +10,15 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -cloud-storage = "0.11.1" +google-cloud-storage = "0.9.0" +google-cloud-auth = "0.9.0" +google-cloud-default = { version = "0.1.0", features = ["storage", "google-cloud-metadata"] } vlog = { path = "../vlog", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } metrics = "0.20" tokio = { version = "1.21.2", features = ["full"] } +http = "0.2.9" [dev-dependencies] -expanduser = "1.2.2" tempdir = "0.3.7" diff --git a/core/lib/object_store/src/file_backed_object_store.rs b/core/lib/object_store/src/file_backed_object_store.rs index 06918a8e3492..d864cad68dc7 100644 --- a/core/lib/object_store/src/file_backed_object_store.rs +++ b/core/lib/object_store/src/file_backed_object_store.rs @@ -1,15 +1,23 @@ -use std::error::Error; use std::fmt::Debug; use std::fs; use std::fs::File; -use std::io::{Read, Write}; +use std::io::{ErrorKind, Read, Write}; use crate::object_store::{ - ObjectStore, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, + ObjectStore, ObjectStoreError, LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH, PROVER_JOBS_BUCKET_PATH, SCHEDULER_WITNESS_JOBS_BUCKET_PATH, WITNESS_INPUT_BUCKET_PATH, }; +impl From for ObjectStoreError { + fn from(err: std::io::Error) -> Self { + match err.kind() { + ErrorKind::NotFound => ObjectStoreError::KeyNotFound(err.to_string()), + _ => ObjectStoreError::Other(err.to_string()), + } + } +} + #[derive(Debug)] pub struct FileBackedObjectStore { base_dir: String, @@ -38,13 +46,12 @@ impl ObjectStore for FileBackedObjectStore { type Bucket = &'static str; type Key = String; type Value = Vec; - type Error = Box; fn get_store_type(&self) -> &'static str { "FileBackedStore" } - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { + fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { let filename = self.filename(bucket, key); let mut file = File::open(filename)?; let mut buffer = Vec::::new(); @@ -57,14 +64,14 @@ impl ObjectStore for FileBackedObjectStore { bucket: Self::Bucket, key: Self::Key, value: Self::Value, - ) -> Result<(), Self::Error> { + ) -> Result<(), ObjectStoreError> { let filename = self.filename(bucket, key); let mut file = File::create(filename)?; file.write_all(&value)?; Ok(()) } - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), Self::Error> { + fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError> { let filename = self.filename(bucket, key); fs::remove_file(filename)?; Ok(()) @@ -73,9 +80,10 @@ impl ObjectStore for FileBackedObjectStore { #[cfg(test)] mod test { - use super::*; use tempdir::TempDir; + use super::*; + #[test] fn test_get() { let dir = TempDir::new("test-data").unwrap(); diff --git a/core/lib/object_store/src/gcs_object_store.rs b/core/lib/object_store/src/gcs_object_store.rs index 1464d581ba13..3d690a7d1fed 100644 --- a/core/lib/object_store/src/gcs_object_store.rs +++ b/core/lib/object_store/src/gcs_object_store.rs @@ -1,29 +1,68 @@ -pub use cloud_storage; -use cloud_storage::Client; -use std::env; -use std::error::Error; +use std::fmt; use std::sync::mpsc::channel; use std::time::Instant; + +use google_cloud_default::WithAuthExt; +use google_cloud_storage::client::{Client, ClientConfig}; +use google_cloud_storage::http::{ + objects::{ + delete::DeleteObjectRequest, + download::Range, + get::GetObjectRequest, + upload::{Media, UploadObjectRequest, UploadType}, + }, + Error::{self, HttpClient}, +}; +use http::StatusCode; use tokio; use zksync_config::ObjectStoreConfig; -use crate::object_store::ObjectStore; +use crate::object_store::{ObjectStore, ObjectStoreError}; -#[derive(Debug)] pub struct GoogleCloudStorage { client: Client, bucket_prefix: String, } + +// we need to implement custom Debug for GoogleCloudStorage because +// `google_cloud_storage::client::Client` type does not implements debug. +impl fmt::Debug for GoogleCloudStorage { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GoogleCloudStorage") + .field("bucket_prefix", &self.bucket_prefix) + .finish() + } +} + +impl From for ObjectStoreError { + fn from(error: Error) -> Self { + match error { + HttpClient(reqwest_error) => { + if let Some(status) = reqwest_error.status() { + match status { + StatusCode::NOT_FOUND => { + ObjectStoreError::KeyNotFound(reqwest_error.to_string()) + } + _ => ObjectStoreError::Other(reqwest_error.to_string()), + } + } else { + ObjectStoreError::Other(reqwest_error.to_string()) + } + } + _ => ObjectStoreError::Other(error.to_string()), + } + } +} + pub const GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE: &str = "GoogleCloudStorage"; impl GoogleCloudStorage { - pub fn new() -> Self { - let config = ObjectStoreConfig::from_env(); - env::set_var("SERVICE_ACCOUNT", config.service_account_path); + pub fn new(client: Client) -> Self { + let object_store_config = ObjectStoreConfig::from_env(); GoogleCloudStorage { - client: Client::new(), - bucket_prefix: ObjectStoreConfig::from_env().bucket_base_url, + client, + bucket_prefix: object_store_config.bucket_base_url, } } @@ -35,7 +74,7 @@ impl GoogleCloudStorage { self, bucket: &'static str, key: String, - ) -> Result, cloud_storage::Error> { + ) -> Result, ObjectStoreError> { let started_at = Instant::now(); vlog::info!( "Fetching data from GCS for key {} from bucket {}", @@ -44,8 +83,15 @@ impl GoogleCloudStorage { ); let blob = self .client - .object() - .download(&self.bucket_prefix, &self.filename(bucket, &key)) + .download_object( + &GetObjectRequest { + bucket: self.bucket_prefix.clone(), + object: self.filename(bucket, &key), + ..Default::default() + }, + &Range::default(), + None, + ) .await; vlog::info!( "Fetched data from GCS for key {} from bucket {} and it took: {:?}", @@ -58,7 +104,7 @@ impl GoogleCloudStorage { started_at.elapsed(), "bucket" => bucket ); - blob + blob.map_err(ObjectStoreError::from) } async fn put_async( @@ -66,21 +112,24 @@ impl GoogleCloudStorage { bucket: &'static str, key: String, value: Vec, - ) -> Result<(), cloud_storage::Error> { + ) -> Result<(), ObjectStoreError> { let started_at = Instant::now(); vlog::info!( "Storing data to GCS for key {} from bucket {}", &self.filename(bucket, &key), self.bucket_prefix ); + let upload_type = UploadType::Simple(Media::new(self.filename(bucket, &key))); let object = self .client - .object() - .create( - &self.bucket_prefix, + .upload_object( + &UploadObjectRequest { + bucket: self.bucket_prefix.clone(), + ..Default::default() + }, value, - &self.filename(bucket, &key), - "binary/blob", + &upload_type, + None, ) .await; vlog::info!( @@ -94,29 +143,26 @@ impl GoogleCloudStorage { started_at.elapsed(), "bucket" => bucket ); - object.map(drop) + object.map(drop).map_err(ObjectStoreError::from) } - async fn remove_async( - self, - bucket: &'static str, - key: String, - ) -> Result<(), cloud_storage::Error> { + async fn remove_async(self, bucket: &'static str, key: String) -> Result<(), ObjectStoreError> { vlog::info!( "Removing data from GCS for key {} from bucket {}", &self.filename(bucket, &key), self.bucket_prefix ); self.client - .object() - .delete(&self.bucket_prefix, &self.filename(bucket, &key)) + .delete_object( + &DeleteObjectRequest { + bucket: self.bucket_prefix.clone(), + object: self.filename(bucket, &key), + ..Default::default() + }, + None, + ) .await - } -} - -impl Default for GoogleCloudStorage { - fn default() -> Self { - Self::new() + .map_err(ObjectStoreError::from) } } @@ -133,8 +179,11 @@ where .enable_time() .build() .unwrap(); - let gcs = GoogleCloudStorage::new(); - let result = runtime.block_on(Box::pin(query(gcs))); + let result = runtime.block_on(async move { + let gcs_config = ClientConfig::default().with_auth().await.unwrap(); + let gcs = GoogleCloudStorage::new(Client::new(gcs_config)); + query(gcs).await + }); tx.send(result).unwrap(); }); rx.recv().unwrap() @@ -144,14 +193,13 @@ impl ObjectStore for GoogleCloudStorage { type Bucket = &'static str; type Key = String; type Value = Vec; - type Error = Box; fn get_store_type(&self) -> &'static str { GOOGLE_CLOUD_STORAGE_OBJECT_STORE_TYPE } - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { - gcs_query(move |gcs| gcs.get_async(bucket, key)).map_err(|e| e.into()) + fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result { + gcs_query(move |gcs| gcs.get_async(bucket, key)) } fn put( @@ -159,11 +207,11 @@ impl ObjectStore for GoogleCloudStorage { bucket: Self::Bucket, key: Self::Key, value: Self::Value, - ) -> Result<(), Self::Error> { - gcs_query(move |gcs| gcs.put_async(bucket, key, value)).map_err(|e| e.into()) + ) -> Result<(), ObjectStoreError> { + gcs_query(move |gcs| gcs.put_async(bucket, key, value)) } - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), Self::Error> { - gcs_query(move |gcs| gcs.remove_async(bucket, key)).map_err(|e| e.into()) + fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError> { + gcs_query(move |gcs| gcs.remove_async(bucket, key)) } } diff --git a/core/lib/object_store/src/lib.rs b/core/lib/object_store/src/lib.rs index d31ef4bbc383..15b71013d1db 100644 --- a/core/lib/object_store/src/lib.rs +++ b/core/lib/object_store/src/lib.rs @@ -3,7 +3,6 @@ extern crate core; pub mod file_backed_object_store; pub mod gcs_object_store; pub mod object_store; -pub use cloud_storage; pub mod gcs_utils; #[cfg(test)] diff --git a/core/lib/object_store/src/object_store.rs b/core/lib/object_store/src/object_store.rs index fdce540fc363..2ff4c80057f0 100644 --- a/core/lib/object_store/src/object_store.rs +++ b/core/lib/object_store/src/object_store.rs @@ -1,8 +1,11 @@ -use std::error; -use std::fmt::Debug; +use google_cloud_default::WithAuthExt; +use google_cloud_storage::client::{Client, ClientConfig}; +use std::fmt::{Debug, Display, Formatter}; use std::str::FromStr; +use std::sync::mpsc::channel; +use std::{error, thread}; +use tokio::runtime::Builder; -pub use cloud_storage::Error; use zksync_config::ObjectStoreConfig; use crate::file_backed_object_store::FileBackedObjectStore; @@ -14,17 +17,33 @@ pub const LEAF_AGGREGATION_WITNESS_JOBS_BUCKET_PATH: &str = "leaf_aggregation_wi pub const NODE_AGGREGATION_WITNESS_JOBS_BUCKET_PATH: &str = "node_aggregation_witness_jobs"; pub const SCHEDULER_WITNESS_JOBS_BUCKET_PATH: &str = "scheduler_witness_jobs"; +#[derive(Debug)] +pub enum ObjectStoreError { + KeyNotFound(String), + Other(String), +} + +impl Display for ObjectStoreError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ObjectStoreError::KeyNotFound(e) => write!(f, "Key Notfound error: {}", e), + ObjectStoreError::Other(s) => write!(f, "Other error: {}", s), + } + } +} + +impl error::Error for ObjectStoreError {} + /// Trait to fetch and store BLOB's from an object store(S3, Google Cloud Storage, Azure Blobstore etc). pub trait ObjectStore: Debug + Send + Sync { type Bucket: Debug; type Key: Debug; type Value; - type Error; fn get_store_type(&self) -> &'static str; /// Fetches the value for the given key from the given bucket if it exists otherwise returns Error. - fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result; + fn get(&self, bucket: Self::Bucket, key: Self::Key) -> Result; /// Stores the value associating it with the key into the given bucket, if the key already exist then the value is replaced. fn put( @@ -32,20 +51,14 @@ pub trait ObjectStore: Debug + Send + Sync { bucket: Self::Bucket, key: Self::Key, value: Self::Value, - ) -> Result<(), Self::Error>; + ) -> Result<(), ObjectStoreError>; /// Removes the value associated with the key from the given bucket if it exist. - fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), Self::Error>; + fn remove(&mut self, bucket: Self::Bucket, key: Self::Key) -> Result<(), ObjectStoreError>; } -pub type DynamicObjectStore = Box< - dyn ObjectStore< - Bucket = &'static str, - Error = Box, - Key = String, - Value = Vec, - >, ->; +pub type DynamicObjectStore = + Box>>; #[derive(Debug, Eq, PartialEq)] pub enum ObjectStoreMode { @@ -72,7 +85,8 @@ pub fn create_object_store( match mode { ObjectStoreMode::GCS => { vlog::trace!("Initialized GoogleCloudStorage Object store"); - Box::new(GoogleCloudStorage::new()) + let gcs_config = fetch_gcs_config(); + Box::new(GoogleCloudStorage::new(Client::new(gcs_config))) } ObjectStoreMode::FileBacked => { vlog::trace!("Initialized FileBacked Object store"); @@ -86,3 +100,19 @@ pub fn create_object_store_from_env() -> DynamicObjectStore { let mode = ObjectStoreMode::from_str(&config.mode).unwrap(); create_object_store(mode, config.file_backed_base_path) } + +fn fetch_gcs_config() -> ClientConfig { + let (tx, rx) = channel(); + thread::spawn(move || { + let runtime = Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + let result = runtime + .block_on(ClientConfig::default().with_auth()) + .expect("Failed build GCS client config"); + tx.send(result).unwrap(); + }); + rx.recv().unwrap() +} diff --git a/core/lib/object_store/src/tests.rs b/core/lib/object_store/src/tests.rs index 86faff232af6..1b590a64d92d 100644 --- a/core/lib/object_store/src/tests.rs +++ b/core/lib/object_store/src/tests.rs @@ -1,5 +1,4 @@ use crate::object_store::{create_object_store, ObjectStoreMode}; -use expanduser::expanduser; use std::env; #[test] @@ -16,9 +15,7 @@ fn test_object_store_gcs_creation() { } fn set_object_store_environment_variable() { - let path = expanduser("~/gcloud/service_account.json").unwrap(); - env::set_var("OBJECT_STORE_SERVICE_ACCOUNT_PATH", path); - env::set_var("OBJECT_STORE_BUCKET_BASE_URL", "/base/url"); + env::set_var("OBJECT_STORE_BUCKET_BASE_URL", "zksync_unit_test"); env::set_var("OBJECT_STORE_MODE", "GCS"); env::set_var("OBJECT_STORE_FILE_BACKED_BASE_PATH", "/base/url"); } diff --git a/core/lib/prometheus_exporter/src/lib.rs b/core/lib/prometheus_exporter/src/lib.rs index a936450b6955..85f1e884268c 100644 --- a/core/lib/prometheus_exporter/src/lib.rs +++ b/core/lib/prometheus_exporter/src/lib.rs @@ -15,6 +15,22 @@ pub fn run_prometheus_exporter(config: PrometheusConfig, use_pushgateway: bool) let storage_interactions_per_call_buckets = [ 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0, ]; + let vm_memory_per_call_buckets = [ + 1000.0, + 10000.0, + 100000.0, + 500000.0, + 1000000.0, + 5000000.0, + 10000000.0, + 50000000.0, + 100000000.0, + 500000000.0, + 1000000000.0, + ]; + let percents_buckets = [ + 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 120.0, + ]; let builder = if use_pushgateway { let job_id = "zksync-pushgateway"; @@ -46,6 +62,11 @@ pub fn run_prometheus_exporter(config: PrometheusConfig, use_pushgateway: bool) &storage_interactions_per_call_buckets, ) .unwrap() + .set_buckets_for_metric( + Matcher::Prefix("runtime_context.memory".to_owned()), + &vm_memory_per_call_buckets, + ) + .unwrap() .set_buckets_for_metric(Matcher::Prefix("server.prover".to_owned()), &prover_buckets) .unwrap() .set_buckets_for_metric( @@ -53,6 +74,8 @@ pub fn run_prometheus_exporter(config: PrometheusConfig, use_pushgateway: bool) &slow_latency_buckets, ) .unwrap() + .set_buckets_for_metric(Matcher::Prefix("vm.refund".to_owned()), &percents_buckets) + .unwrap() .build() .expect("failed to install Prometheus recorder"); diff --git a/core/lib/prover_utils/Cargo.toml b/core/lib/prover_utils/Cargo.toml index ae1ab209815d..b1c0e442bcf2 100644 --- a/core/lib/prover_utils/Cargo.toml +++ b/core/lib/prover_utils/Cargo.toml @@ -11,6 +11,8 @@ categories = ["cryptography"] [dependencies] vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } metrics = "0.20" reqwest = { version = "0.11", features = ["blocking"] } diff --git a/core/lib/prover_utils/src/lib.rs b/core/lib/prover_utils/src/lib.rs index 97d1b00cc4e8..194f0fcff66d 100644 --- a/core/lib/prover_utils/src/lib.rs +++ b/core/lib/prover_utils/src/lib.rs @@ -6,6 +6,8 @@ use std::path::Path; use std::time::Duration; use std::time::Instant; +pub mod region_fetcher; + fn download_bytes(key_download_url: &str) -> reqwest::Result> { vlog::info!("Downloading initial setup from {:?}", key_download_url); @@ -87,3 +89,28 @@ pub fn numeric_index_to_circuit_name(circuit_numeric_index: u8) -> Option<&'stat _ => None, } } + +pub fn circuit_name_to_numeric_index(circuit_name: &str) -> Option { + match circuit_name { + "Scheduler" => Some(0), + "Node aggregation" => Some(1), + "Leaf aggregation" => Some(2), + "Main VM" => Some(3), + "Decommitts sorter" => Some(4), + "Code decommitter" => Some(5), + "Log demuxer" => Some(6), + "Keccak" => Some(7), + "SHA256" => Some(8), + "ECRecover" => Some(9), + "RAM permutation" => Some(10), + "Storage sorter" => Some(11), + "Storage application" => Some(12), + "Initial writes pubdata rehasher" => Some(13), + "Repeated writes pubdata rehasher" => Some(14), + "Events sorter" => Some(15), + "L1 messages sorter" => Some(16), + "L1 messages rehasher" => Some(17), + "L1 messages merklizer" => Some(18), + _ => None, + } +} diff --git a/core/lib/prover_utils/src/region_fetcher.rs b/core/lib/prover_utils/src/region_fetcher.rs new file mode 100644 index 000000000000..882f96e08628 --- /dev/null +++ b/core/lib/prover_utils/src/region_fetcher.rs @@ -0,0 +1,29 @@ +use reqwest::header::{HeaderMap, HeaderValue}; +use reqwest::Method; + +use zksync_config::configs::ProverGroupConfig; +use zksync_utils::http_with_retries::send_request_with_retries; + +pub async fn get_region() -> String { + let prover_group_config = ProverGroupConfig::from_env(); + let mut headers = HeaderMap::new(); + headers.insert("Metadata-Flavor", HeaderValue::from_static("Google")); + let response = send_request_with_retries( + &prover_group_config.region_read_url, + 5, + Method::GET, + Some(headers), + None, + ) + .await; + response + .unwrap_or_else(|_| { + panic!( + "Failed fetching response from url: {}", + prover_group_config.region_read_url + ) + }) + .text() + .await + .expect("Failed to read response as text") +} diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 034de9ffe75e..771e3f039c3b 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::fmt::Debug; -use zksync_types::{tokens::TokenInfo, Address, StorageKey, StorageValue, ZkSyncReadStorage, H256}; +use zksync_types::{StorageKey, StorageValue, ZkSyncReadStorage, H256}; /// `StorageView` is buffer for `StorageLog`s between storage and transaction execution code. /// In order to commit transactions logs should be submitted @@ -17,15 +17,11 @@ pub struct StorageView { read_storage_keys: HashMap, // Cache for initial/repeated writes. It's only valid within one L1 batch execution. read_initial_writes: HashMap, - deployed_contracts: HashMap>, - added_tokens: Vec, - new_factory_deps: HashMap>, pub storage_invocations: usize, pub new_storage_invocations: usize, pub get_value_storage_invocations: usize, pub set_value_storage_invocations: usize, - pub contract_load_invocations: usize, } impl StorageView { @@ -35,12 +31,8 @@ impl StorageView { modified_storage_keys: HashMap::new(), read_storage_keys: HashMap::new(), read_initial_writes: HashMap::new(), - deployed_contracts: HashMap::new(), - new_factory_deps: HashMap::new(), - added_tokens: vec![], storage_invocations: 0, get_value_storage_invocations: 0, - contract_load_invocations: 0, set_value_storage_invocations: 0, new_storage_invocations: 0, } @@ -108,29 +100,14 @@ impl StorageView { &self.modified_storage_keys } - pub fn save_token(&mut self, token: TokenInfo) { - self.added_tokens.push(token); - } - - pub fn save_contract(&mut self, address: Address, bytecode: Vec) { - self.deployed_contracts.insert(address, bytecode); - } - - pub fn load_contract(&mut self, address: Address) -> Option> { - self.contract_load_invocations += 1; - self.storage_handle - .load_contract(address) - .or_else(|| self.deployed_contracts.get(&address).cloned()) - } - pub fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.storage_handle - .load_factory_dep(hash) - .or_else(|| self.new_factory_deps.get(&hash).cloned()) + self.storage_handle.load_factory_dep(hash) } - pub fn save_factory_dep(&mut self, hash: H256, bytecode: Vec) { - self.new_factory_deps.insert(hash, bytecode); + pub fn get_cache_size(&self) -> usize { + self.modified_storage_keys.len() * std::mem::size_of::<(StorageKey, StorageValue)>() + + self.read_initial_writes.len() * std::mem::size_of::<(StorageKey, bool)>() + + self.read_storage_keys.len() * std::mem::size_of::<(StorageKey, StorageValue)>() } } @@ -141,7 +118,7 @@ mod test { use tempfile::TempDir; use zksync_storage::db::Database; use zksync_storage::RocksDB; - use zksync_types::{AccountTreeId, H256}; + use zksync_types::{AccountTreeId, Address, H256}; use zksync_utils::u32_to_h256; #[test] diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index b50dc967c994..3e47f92a946b 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Formatter}; use std::ops::{Add, AddAssign}; use zksync_basic_types::{H2048, H256, U256}; -use zksync_config::constants::FAIR_L2_GAS_PRICE; +use zksync_contracts::BaseSystemContractsHashes; use crate::{ l2_to_l1_log::L2ToL1Log, priority_op_onchain_data::PriorityOpOnchainData, @@ -59,6 +59,7 @@ pub struct L1BatchHeader { pub l1_gas_price: u64, /// The L2 gas price that the operator agrees on. pub l2_fair_gas_price: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, } /// Holder for the miniblock metadata that is not available from transactions themselves. @@ -73,6 +74,7 @@ pub struct MiniblockHeader { pub l1_gas_price: u64, // L1 gas price assumed in the corresponding batch pub l2_fair_gas_price: u64, // L2 gas price assumed in the corresponding batch + pub base_system_contracts_hashes: BaseSystemContractsHashes, } impl L1BatchHeader { @@ -80,6 +82,7 @@ impl L1BatchHeader { number: L1BatchNumber, timestamp: u64, fee_account_address: Address, + base_system_contracts_hashes: BaseSystemContractsHashes, ) -> L1BatchHeader { Self { number, @@ -94,19 +97,13 @@ impl L1BatchHeader { bloom: H2048::default(), initial_bootloader_contents: vec![], used_contract_hashes: vec![], - // For now, base fee is always equal to the minimal one. - base_fee_per_gas: FAIR_L2_GAS_PRICE, + base_fee_per_gas: 0, l1_gas_price: 0, - l2_fair_gas_price: FAIR_L2_GAS_PRICE, + l2_fair_gas_price: 0, + base_system_contracts_hashes, } } - /// Mock block header, existing only for tests. - #[doc(hidden)] - pub fn mock(number: L1BatchNumber) -> Self { - Self::new(number, 0, Address::default()) - } - /// Creates a hash of the priority ops data. pub fn priority_ops_onchain_data_hash(&self) -> H256 { let mut rolling_hash: H256 = keccak256(&[]).into(); diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 6f149661ac8f..559700b15259 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -10,11 +10,9 @@ use std::collections::HashMap; use std::fmt::Debug; use serde::{Deserialize, Serialize}; -use zksync_contracts::{DEFAULT_ACCOUNT_CODE, PROVED_BLOCK_BOOTLOADER_CODE}; use zksync_config::constants::ZKPORTER_IS_AVAILABLE; use zksync_mini_merkle_tree::mini_merkle_tree_root_hash; -use zksync_utils::u256_to_h256; use crate::circuit::GEOMETRY_CONFIG; use crate::ethabi::Token; @@ -69,27 +67,6 @@ pub struct BlockMetadata { pub pass_through_data_hash: H256, } -impl BlockMetadata { - /// Mock metadata, exists only for tests. - #[doc(hidden)] - pub fn mock() -> Self { - Self { - root_hash: H256::zero(), - rollup_last_leaf_index: 1, - merkle_root_hash: H256::zero(), - initial_writes_compressed: vec![], - repeated_writes_compressed: vec![], - commitment: Default::default(), - l2_l1_messages_compressed: vec![], - l2_l1_merkle_root: H256::default(), - block_meta_params: BlockMetaParameters::default(), - aux_data_hash: Default::default(), - meta_parameters_hash: Default::default(), - pass_through_data_hash: Default::default(), - } - } -} - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BlockWithMetadata { pub header: L1BatchHeader, @@ -331,16 +308,6 @@ pub struct BlockMetaParameters { pub default_aa_code_hash: H256, } -impl Default for BlockMetaParameters { - fn default() -> Self { - Self { - zkporter_is_available: ZKPORTER_IS_AVAILABLE, - bootloader_code_hash: u256_to_h256(PROVED_BLOCK_BOOTLOADER_CODE.hash), - default_aa_code_hash: u256_to_h256(DEFAULT_ACCOUNT_CODE.hash), - } - } -} - impl BlockMetaParameters { pub fn to_bytes(&self) -> Vec { const SERIALIZED_SIZE: usize = 4 + 1 + 32 + 32; @@ -411,8 +378,14 @@ impl BlockCommitment { rollup_root_hash: H256, initial_writes: Vec, repeated_writes: Vec, + bootloader_code_hash: H256, + default_aa_code_hash: H256, ) -> Self { - let meta_parameters = BlockMetaParameters::default(); + let meta_parameters = BlockMetaParameters { + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + bootloader_code_hash, + default_aa_code_hash, + }; Self { pass_through_data: BlockPassThroughData { diff --git a/core/lib/types/src/event.rs b/core/lib/types/src/event.rs index 6e183bb935d4..1a554bc009f0 100644 --- a/core/lib/types/src/event.rs +++ b/core/lib/types/src/event.rs @@ -52,7 +52,7 @@ static L1_MESSAGE_EVENT_SIGNATURE: Lazy = Lazy::new(|| { ) }); -static BRIDGE_INITIALIZATION_SIGNATURE: Lazy = Lazy::new(|| { +static BRIDGE_INITIALIZATION_SIGNATURE_OLD: Lazy = Lazy::new(|| { ethabi::long_signature( "BridgeInitialization", &[ @@ -64,6 +64,18 @@ static BRIDGE_INITIALIZATION_SIGNATURE: Lazy = Lazy::new(|| { ) }); +static BRIDGE_INITIALIZATION_SIGNATURE_NEW: Lazy = Lazy::new(|| { + ethabi::long_signature( + "BridgeInitialize", + &[ + ethabi::ParamType::Address, + ethabi::ParamType::String, + ethabi::ParamType::String, + ethabi::ParamType::Uint(8), + ], + ) +}); + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { ethabi::long_signature( "MarkedAsKnown", @@ -103,7 +115,8 @@ fn extract_added_token_info_from_addresses( .iter() .find(|event| { event.address == l2_token_address - && event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE + && (event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_NEW + || event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_OLD) }) .map(|event| { let l1_token_address = h256_to_account_address(&event.indexed_topics[1]); diff --git a/core/lib/types/src/explorer_api.rs b/core/lib/types/src/explorer_api.rs index 03f9780b2f33..9ed08b3e112e 100644 --- a/core/lib/types/src/explorer_api.rs +++ b/core/lib/types/src/explorer_api.rs @@ -1,5 +1,6 @@ use serde::de::{Deserializer, Error, MapAccess, Unexpected, Visitor}; use std::{collections::HashMap, fmt}; +use zksync_contracts::BaseSystemContractsHashes; use bigdecimal::BigDecimal; use chrono::{DateTime, Utc}; @@ -40,6 +41,14 @@ pub struct BlocksQuery { pub pagination: PaginationQuery, } +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct L1BatchesQuery { + pub from: Option, + #[serde(flatten)] + pub pagination: PaginationQuery, +} + #[derive(Debug, Clone, Copy)] pub struct TxPosition { pub block_number: MiniblockNumber, @@ -52,6 +61,7 @@ pub struct TransactionsQuery { pub from_block_number: Option, pub from_tx_index: Option, pub block_number: Option, + pub l1_batch_number: Option, pub address: Option
, pub account_address: Option
, pub contract_address: Option
, @@ -260,6 +270,27 @@ pub struct ContractBasicInfo { #[serde(rename_all = "camelCase")] pub struct BlockDetails { pub number: MiniblockNumber, + pub l1_batch_number: L1BatchNumber, + pub timestamp: u64, + pub l1_tx_count: usize, + pub l2_tx_count: usize, + pub root_hash: Option, + pub status: BlockStatus, + pub commit_tx_hash: Option, + pub committed_at: Option>, + pub prove_tx_hash: Option, + pub proven_at: Option>, + pub execute_tx_hash: Option, + pub executed_at: Option>, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1BatchDetails { + pub number: L1BatchNumber, pub timestamp: u64, pub l1_tx_count: usize, pub l2_tx_count: usize, @@ -271,6 +302,20 @@ pub struct BlockDetails { pub proven_at: Option>, pub execute_tx_hash: Option, pub executed_at: Option>, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1BatchPageItem { + pub number: L1BatchNumber, + pub timestamp: u64, + pub l1_tx_count: usize, + pub l2_tx_count: usize, + pub root_hash: Option, + pub status: BlockStatus, } #[derive(Debug, Clone, Serialize)] @@ -360,6 +405,8 @@ pub struct VerificationIncomingRequest { pub optimization_used: bool, #[serde(default)] pub constructor_arguments: Bytes, + #[serde(default)] + pub is_system: bool, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 3f5605235aa6..62fa203300fa 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -44,8 +44,8 @@ impl Fee { assert!(block_base_fee_per_gas <= self.max_fee_per_gas); assert!(self.max_priority_fee_per_gas <= self.max_fee_per_gas); - let max_that_operator_could_take = block_base_fee_per_gas + self.max_priority_fee_per_gas; - std::cmp::min(max_that_operator_could_take, self.max_fee_per_gas) + // For now, we charge only for base fee. + block_base_fee_per_gas } } diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 521839a174d1..64babf7a9673 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -87,6 +87,8 @@ pub struct L1TxCommonData { pub layer_2_tip_fee: U256, /// The total cost the sender paid for the transaction. pub full_fee: U256, + /// The maximal fee per gas to be used for L1->L2 transaction + pub max_fee_per_gas: U256, /// The maximum number of gas that a transaction can spend at a price of gas equals 1. pub gas_limit: U256, /// The maximum number of gas per 1 byte of pubdata. @@ -262,7 +264,6 @@ impl TryFrom for L1Tx { let gas_per_pubdata_limit = transaction.remove(0).into_uint().unwrap(); let max_fee_per_gas = transaction.remove(0).into_uint().unwrap(); - assert_eq!(max_fee_per_gas, U256::zero()); let max_priority_fee_per_gas = transaction.remove(0).into_uint().unwrap(); assert_eq!(max_priority_fee_per_gas, U256::zero()); @@ -324,6 +325,7 @@ impl TryFrom for L1Tx { refund_recipient, full_fee: U256::zero(), gas_limit, + max_fee_per_gas, gas_per_pubdata_limit, op_processing_type: OpProcessingType::Common, priority_queue_type: PriorityQueueType::Deque, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 380812147e06..20fc17fdee14 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -1,11 +1,12 @@ -use rlp::RlpStream; +use rlp::{Rlp, RlpStream}; use self::error::SignError; use crate::transaction_request::PaymasterParams; use crate::{ - api::Eip712Meta, tx::primitives::PackedEthSignature, tx::Execute, web3::types::U64, Address, - Bytes, EIP712TypedStructure, Eip712Domain, ExecuteTransactionCommon, InputData, L2ChainId, - Nonce, StructBuilder, Transaction, EIP_712_TX_TYPE, H256, PRIORITY_OPERATION_L2_TX_TYPE, U256, + api, tx::primitives::PackedEthSignature, tx::Execute, web3::types::U64, Address, Bytes, + EIP712TypedStructure, Eip712Domain, ExecuteTransactionCommon, InputData, L2ChainId, Nonce, + StructBuilder, Transaction, EIP_1559_TX_TYPE, EIP_712_TX_TYPE, H256, + PRIORITY_OPERATION_L2_TX_TYPE, U256, }; use serde::{Deserialize, Serialize}; @@ -189,6 +190,27 @@ impl L2Tx { self.common_data.set_input(data, hash) } + pub fn extract_chain_id(&self) -> Option { + let bytes = self.common_data.input_data()?; + let chain_id = match bytes.first() { + Some(x) if *x >= 0x80 => { + let rlp = Rlp::new(&bytes); + let v = rlp.val_at(6).ok()?; + PackedEthSignature::unpack_v(v).ok()?.1.unwrap_or(0) + } + Some(x) if *x == EIP_1559_TX_TYPE => { + let rlp = Rlp::new(&bytes[1..]); + rlp.val_at(0).ok()? + } + Some(x) if *x == EIP_712_TX_TYPE => { + let rlp = Rlp::new(&bytes[1..]); + rlp.val_at(10).ok()? + } + _ => return None, + }; + Some(chain_id) + } + pub fn get_rlp_bytes(&self, chain_id: L2ChainId) -> Bytes { let mut rlp_stream = RlpStream::new(); let tx: TransactionRequest = self.clone().into(); @@ -281,7 +303,7 @@ impl From for TransactionRequest { Some(U64::from(tx_type)) }, access_list: None, - eip712_meta: Some(Eip712Meta { + eip712_meta: Some(api::Eip712Meta { gas_per_pubdata: tx.common_data.fee.gas_per_pubdata_limit, factory_deps: tx.execute.factory_deps, custom_signature: Some(tx.common_data.signature), @@ -307,6 +329,45 @@ impl From for Transaction { } } +impl From for api::Transaction { + fn from(tx: L2Tx) -> Self { + let tx_type = tx.common_data.transaction_type as u32; + let (v, r, s) = + if let Ok(sig) = PackedEthSignature::deserialize_packed(&tx.common_data.signature) { + ( + Some(U64::from(sig.v())), + Some(U256::from(sig.r())), + Some(U256::from(sig.s())), + ) + } else { + (None, None, None) + }; + + Self { + hash: tx.hash(), + chain_id: tx.extract_chain_id().unwrap_or_default().into(), + nonce: U256::from(tx.common_data.nonce.0), + from: Some(tx.common_data.initiator_address), + to: Some(tx.recipient_account()), + value: tx.execute.value, + gas_price: Some(tx.common_data.fee.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.common_data.fee.max_priority_fee_per_gas), + max_fee_per_gas: Some(tx.common_data.fee.max_fee_per_gas), + gas: tx.common_data.fee.gas_limit, + input: Bytes(tx.execute.calldata), + v, + r, + s, + transaction_type: if tx_type == 0 { + None + } else { + Some(U64::from(tx_type)) + }, + ..Default::default() + } + } +} + impl EIP712TypedStructure for L2Tx { const TYPE_NAME: &'static str = "Transaction"; diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index f0f12d018593..67661c3c7d46 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,6 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] +use fee::encoding_len; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -36,6 +37,7 @@ pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod pubdata_packing; pub mod storage; +pub mod storage_writes_deduplicator; pub mod system_contracts; pub mod tokens; pub mod tx; @@ -44,11 +46,9 @@ pub mod vm_trace; pub mod api; pub mod eth_sender; pub mod helpers; -pub mod log_query_sorter; pub mod proofs; pub mod transaction_request; pub mod utils; - /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; @@ -64,13 +64,19 @@ pub const LEGACY_TX_TYPE: u8 = 0x0; /// Denotes the first byte of some legacy transaction, which type is unknown to the server. pub const PRIORITY_OPERATION_L2_TX_TYPE: u8 = 0xff; -#[derive(Debug, Clone)] +#[derive(Clone, Serialize, Deserialize)] pub struct Transaction { pub common_data: ExecuteTransactionCommon, pub execute: Execute, pub received_timestamp_ms: u64, } +impl std::fmt::Debug for Transaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("Transaction").field(&self.hash()).finish() + } +} + impl PartialEq for Transaction { fn eq(&self, other: &Transaction) -> bool { self.hash() == other.hash() @@ -109,9 +115,7 @@ impl Transaction { ExecuteTransactionCommon::L2(_) => "l2_transaction", } } -} -impl Transaction { pub fn hash(&self) -> H256 { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.hash(), @@ -127,12 +131,67 @@ impl Transaction { } } + /// Returns the payer for L2 transaction and 0 for L1 transactions + pub fn payer(&self) -> Address { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => data.sender, + ExecuteTransactionCommon::L2(data) => { + let paymaster = data.paymaster_params.paymaster; + if paymaster == Address::default() { + data.initiator_address + } else { + paymaster + } + } + } + } + pub fn gas_limit(&self) -> U256 { match &self.common_data { ExecuteTransactionCommon::L1(data) => data.gas_limit, ExecuteTransactionCommon::L2(data) => data.fee.gas_limit, } } + + pub fn max_fee_per_gas(&self) -> U256 { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => data.max_fee_per_gas, + ExecuteTransactionCommon::L2(data) => data.fee.max_fee_per_gas, + } + } + + pub fn gas_per_pubdata_byte_limit(&self) -> U256 { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => data.gas_per_pubdata_limit, + ExecuteTransactionCommon::L2(data) => data.fee.gas_per_pubdata_limit, + } + } + + // Returns how many slots it takes to encode the transaction + pub fn encoding_len(&self) -> usize { + let data_len = self.execute.calldata.len(); + let factory_deps_len = self + .execute + .factory_deps + .as_ref() + .map(|deps| deps.len()) + .unwrap_or_default(); + let (signature_len, paymaster_input_len) = match &self.common_data { + ExecuteTransactionCommon::L1(_) => (0, 0), + ExecuteTransactionCommon::L2(l2_common_data) => ( + l2_common_data.signature.len(), + l2_common_data.paymaster_params.paymaster_input.len(), + ), + }; + + encoding_len( + data_len as u64, + signature_len as u64, + factory_deps_len as u64, + paymaster_input_len as u64, + 0, + ) + } } /// Optional input `Ethereum`-like encoded transaction if submitted via Web3 API. diff --git a/core/lib/types/src/log_query_sorter.rs b/core/lib/types/src/log_query_sorter.rs deleted file mode 100644 index 928c824380b4..000000000000 --- a/core/lib/types/src/log_query_sorter.rs +++ /dev/null @@ -1,312 +0,0 @@ -// copied from https://github.com/matter-labs/zkevm_test_harness/blob/main/src/witness/sort_storage_access.rs - -use std::cmp::Ordering; - -use zk_evm::aux_structures::LogQuery; - -use rayon::slice::ParallelSliceMut; - -use crate::{StorageLogQuery, StorageLogQueryType, U256}; - -#[derive(Debug, Clone, Copy)] -pub struct LogQueryWithExtendedEnumeration { - pub raw_query: StorageLogQuery, - pub extended_timestamp: u32, -} - -#[derive(Debug)] -pub struct StorageSlotHistoryKeeper { - pub initial_value: Option, - pub current_value: Option, - pub changes_stack: Vec, - pub did_read_at_depth_zero: bool, - pub minimum_log_type: StorageLogQueryType, -} - -#[allow(clippy::all)] -pub fn sort_storage_access_queries( - unsorted_storage_queries: &[StorageLogQuery], -) -> (Vec, Vec) { - let mut sorted_storage_queries_with_extra_timestamp: Vec<_> = unsorted_storage_queries - .iter() - .enumerate() - .map(|(i, el)| LogQueryWithExtendedEnumeration { - raw_query: el.clone(), - extended_timestamp: i as u32, - }) - .collect(); - - sorted_storage_queries_with_extra_timestamp.par_sort_by(|a, b| { - match a - .raw_query - .log_query - .shard_id - .cmp(&a.raw_query.log_query.shard_id) - { - Ordering::Equal => match a - .raw_query - .log_query - .address - .cmp(&b.raw_query.log_query.address) - { - Ordering::Equal => { - match a.raw_query.log_query.key.cmp(&b.raw_query.log_query.key) { - Ordering::Equal => a.extended_timestamp.cmp(&b.extended_timestamp), - r @ _ => r, - } - } - r @ _ => r, - }, - r @ _ => r, - } - }); - - let mut deduplicated_storage_queries = vec![]; - - // now just implement the logic to sort and deduplicate - let mut it = sorted_storage_queries_with_extra_timestamp - .iter() - .peekable(); - - loop { - if it.peek().is_none() { - break; - } - - let candidate = it.peek().unwrap().clone(); - - let subit = it.clone().take_while(|el| { - el.raw_query.log_query.shard_id == candidate.raw_query.log_query.shard_id - && el.raw_query.log_query.address == candidate.raw_query.log_query.address - && el.raw_query.log_query.key == candidate.raw_query.log_query.key - }); - - let mut current_element_history = StorageSlotHistoryKeeper { - initial_value: None, - current_value: None, - changes_stack: vec![], - did_read_at_depth_zero: false, - minimum_log_type: StorageLogQueryType::RepeatedWrite, - }; - let mut last_write_is_rollback = false; - - for (_idx, el) in subit.enumerate() { - let _ = it.next().unwrap(); - - if current_element_history.current_value.is_none() { - assert!( - current_element_history.initial_value.is_none(), - "invalid for query {:?}", - el - ); - // first read potentially - if el.raw_query.log_query.rw_flag == false { - current_element_history.did_read_at_depth_zero = true; - } - } else { - // explicit read at zero - if el.raw_query.log_query.rw_flag == false - && current_element_history.changes_stack.is_empty() - { - current_element_history.did_read_at_depth_zero = true; - } - } - - if current_element_history.current_value.is_none() { - assert!( - current_element_history.initial_value.is_none(), - "invalid for query {:?}", - el - ); - if el.raw_query.log_query.rw_flag == false { - current_element_history.initial_value = Some(el.raw_query.log_query.read_value); - current_element_history.current_value = Some(el.raw_query.log_query.read_value); - } else { - assert!(el.raw_query.log_query.rollback == false); - current_element_history.initial_value = Some(el.raw_query.log_query.read_value); - current_element_history.current_value = Some(el.raw_query.log_query.read_value); - // note: We apply updates few lines later - } - } - - if el.raw_query.log_query.rw_flag == false { - assert_eq!( - &el.raw_query.log_query.read_value, - current_element_history.current_value.as_ref().unwrap(), - "invalid for query {:?}", - el - ); - - // and do not place reads into the stack - } else if el.raw_query.log_query.rw_flag == true { - if matches!(el.raw_query.log_type, StorageLogQueryType::InitialWrite) { - current_element_history.minimum_log_type = StorageLogQueryType::InitialWrite - } - // write-like things manipulate the stack - if el.raw_query.log_query.rollback == false { - last_write_is_rollback = false; - // write - assert_eq!( - &el.raw_query.log_query.read_value, - current_element_history.current_value.as_ref().unwrap(), - "invalid for query {:?}", - el - ); - current_element_history.current_value = - Some(el.raw_query.log_query.written_value); - current_element_history.changes_stack.push(el.clone()); - } else { - last_write_is_rollback = true; - // pop from stack - let popped_change = current_element_history.changes_stack.pop().unwrap(); - // we do not explicitly swap values, and use rollback flag instead, so compare this way - assert_eq!( - el.raw_query.log_query.read_value, - popped_change.raw_query.log_query.read_value, - "invalid for query {:?}", - el - ); - assert_eq!( - el.raw_query.log_query.written_value, - popped_change.raw_query.log_query.written_value, - "invalid for query {:?}", - el - ); - assert_eq!( - &el.raw_query.log_query.written_value, - current_element_history.current_value.as_ref().unwrap(), - "invalid for query {:?}", - el - ); - // check that we properly apply rollbacks - assert_eq!( - el.raw_query.log_query.shard_id, popped_change.raw_query.log_query.shard_id, - "invalid for query {:?}", - el - ); - assert_eq!( - el.raw_query.log_query.address, popped_change.raw_query.log_query.address, - "invalid for query {:?}", - el - ); - assert_eq!( - el.raw_query.log_query.key, popped_change.raw_query.log_query.key, - "invalid for query {:?}", - el - ); - // apply rollback - current_element_history.current_value = Some(el.raw_query.log_query.read_value); - // our convension - } - } - } - - use zk_evm::aux_structures::Timestamp; - - if current_element_history.did_read_at_depth_zero == false - && current_element_history.changes_stack.is_empty() - { - // whatever happened there didn't produce any final changes - assert_eq!( - current_element_history.initial_value.unwrap(), - current_element_history.current_value.unwrap() - ); - assert!(last_write_is_rollback == true); - // here we know that last write was a rollback, and there we no reads after it (otherwise "did_read_at_depth_zero" == true), - // so whatever was an initial value in storage slot it's not ever observed, and we do not need to issue even read here - continue; - } else { - if current_element_history.initial_value.unwrap() - == current_element_history.current_value.unwrap() - { - // no change, but we may need protective read - if current_element_history.did_read_at_depth_zero { - // protective read - let sorted_log_query = StorageLogQuery { - log_query: LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: 0, - aux_byte: 0, - shard_id: candidate.raw_query.log_query.shard_id, - address: candidate.raw_query.log_query.address, - key: candidate.raw_query.log_query.key, - read_value: current_element_history.initial_value.unwrap(), - written_value: current_element_history.current_value.unwrap(), - rw_flag: false, - rollback: false, - is_service: false, - }, - log_type: StorageLogQueryType::Read, - }; - - deduplicated_storage_queries.push(sorted_log_query); - } else { - // we didn't read at depth zero, so it's something like - // - write cell from a into b - // .... - // - write cell from b into a - - // There is a catch here: - // - if it's two "normal" writes, then operator can claim that initial value - // was "a", but it could have been some other, and in this case we want to - // "read" that it was indeed "a" - // - but if the latest "write" was just a rollback, - // then we know that it's basically NOP. We already had a branch above that - // protects us in case of write - rollback - read, so we only need to degrade write into - // read here if the latest write wasn't a rollback - - if current_element_history.changes_stack.is_empty() == false { - // it means that we did accumlate some changes - // degrade to protective read - let sorted_log_query = StorageLogQuery { - log_query: LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: 0, - aux_byte: 0, - shard_id: candidate.raw_query.log_query.shard_id, - address: candidate.raw_query.log_query.address, - key: candidate.raw_query.log_query.key, - read_value: current_element_history.initial_value.unwrap(), - written_value: current_element_history.current_value.unwrap(), - rw_flag: false, - rollback: false, - is_service: false, - }, - log_type: StorageLogQueryType::Read, - }; - - deduplicated_storage_queries.push(sorted_log_query); - } else { - //do nothing - } - } - } else { - // it's final net write - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: 0, - aux_byte: 0, - shard_id: candidate.raw_query.log_query.shard_id, - address: candidate.raw_query.log_query.address, - key: candidate.raw_query.log_query.key, - read_value: current_element_history.initial_value.unwrap(), - written_value: current_element_history.current_value.unwrap(), - rw_flag: true, - rollback: false, - is_service: false, - }; - let sorted_log_query = StorageLogQuery { - log_query: sorted_log_query, - log_type: current_element_history.minimum_log_type, - }; - - deduplicated_storage_queries.push(sorted_log_query); - } - } - } - - ( - sorted_storage_queries_with_extra_timestamp, - deduplicated_storage_queries, - ) -} diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs index ab1f2eafbb29..ab328031f3b5 100644 --- a/core/lib/types/src/proofs.rs +++ b/core/lib/types/src/proofs.rs @@ -14,7 +14,7 @@ use zkevm_test_harness::{ LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, SchedulerCircuitInstanceWitness, }; -use zksync_basic_types::{L1BatchNumber, U256}; +use zksync_basic_types::{L1BatchNumber, H256, U256}; /// Metadata emitted by merkle tree after processing single storage log #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -130,6 +130,7 @@ pub struct PrepareBasicCircuitsJob { #[derive(Clone)] pub struct BasicCircuitWitnessGeneratorInput { pub block_number: L1BatchNumber, + pub previous_block_hash: H256, pub previous_block_timestamp: u64, pub block_timestamp: u64, pub used_bytecodes_hashes: Vec, diff --git a/core/lib/types/src/storage/log.rs b/core/lib/types/src/storage/log.rs index b94da2a1ea19..91b45007c4fc 100644 --- a/core/lib/types/src/storage/log.rs +++ b/core/lib/types/src/storage/log.rs @@ -1,7 +1,5 @@ -use crate::{H160, U256}; use serde::{Deserialize, Serialize}; -use zk_evm::aux_structures::{LogQuery, Timestamp}; -use zkevm_test_harness::witness::sort_storage_access::LogQueryLike; +use zk_evm::aux_structures::LogQuery; use zksync_basic_types::AccountTreeId; use zksync_utils::u256_to_h256; @@ -83,59 +81,3 @@ pub struct StorageLogQuery { pub log_query: LogQuery, pub log_type: StorageLogQueryType, } - -impl LogQueryLike for StorageLogQuery { - fn shard_id(&self) -> u8 { - self.log_query.shard_id - } - - fn address(&self) -> H160 { - self.log_query.address - } - - fn key(&self) -> U256 { - self.log_query.key - } - - fn rw_flag(&self) -> bool { - self.log_query.rw_flag - } - - fn rollback(&self) -> bool { - self.log_query.rollback - } - - fn read_value(&self) -> U256 { - self.log_query.read_value - } - - fn written_value(&self) -> U256 { - self.log_query.written_value - } - - fn create_partially_filled_from_fields( - shard_id: u8, - address: H160, - key: U256, - read_value: U256, - written_value: U256, - rw_flag: bool, - ) -> Self { - Self { - log_type: StorageLogQueryType::Read, - log_query: LogQuery { - timestamp: Timestamp::empty(), - tx_number_in_block: 0, - aux_byte: 0, - shard_id, - address, - key, - read_value, - written_value, - rw_flag, - rollback: false, - is_service: false, - }, - } - } -} diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/types/src/storage_writes_deduplicator.rs new file mode 100644 index 000000000000..1644aaa06769 --- /dev/null +++ b/core/lib/types/src/storage_writes_deduplicator.rs @@ -0,0 +1,276 @@ +use crate::tx::tx_execution_info::DeduplicatedWritesMetrics; +use crate::{AccountTreeId, StorageKey, StorageLogQuery, StorageLogQueryType, U256}; +use std::collections::{HashMap, HashSet}; +use zksync_utils::u256_to_h256; + +#[derive(Debug, Clone, Copy, PartialEq)] +struct UpdateItem { + key: StorageKey, + is_insertion: bool, + is_write_initial: bool, +} + +/// Struct that allows to deduplicate storage writes in-flight. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct StorageWritesDeduplicator { + initial_values: HashMap, + modified_keys: HashSet, + metrics: DeduplicatedWritesMetrics, +} + +impl StorageWritesDeduplicator { + pub fn new() -> Self { + Self::default() + } + + pub fn metrics(&self) -> DeduplicatedWritesMetrics { + self.metrics + } + + /// Applies storage logs to the state. + pub fn apply<'a, I: IntoIterator>(&mut self, logs: I) { + self.process_storage_logs(logs); + } + + /// Returns metrics as if provided storage logs are applied to the state. + /// It's implemented in the following way: apply logs -> save current metrics -> rollback logs. + pub fn apply_and_rollback<'a, I: IntoIterator>( + &mut self, + logs: I, + ) -> DeduplicatedWritesMetrics { + let updates = self.process_storage_logs(logs); + let metrics = self.metrics; + self.rollback(updates); + metrics + } + + /// Applies logs to the empty state and returns metrics. + pub fn apply_on_empty_state<'a, I: IntoIterator>( + logs: I, + ) -> DeduplicatedWritesMetrics { + let mut deduplicator = Self::new(); + deduplicator.apply(logs); + deduplicator.metrics + } + + /// Processes storage logs and returns updates for `modified_keys` and `metrics` fields. + /// Metrics can be used later to rollback the state. + /// We don't care about `initial_values` changes as we only inserted values there and they are always valid. + fn process_storage_logs<'a, I: IntoIterator>( + &mut self, + logs: I, + ) -> Vec { + let mut updates = Vec::new(); + for log in logs.into_iter().filter(|log| log.log_query.rw_flag) { + let key = StorageKey::new( + AccountTreeId::new(log.log_query.address), + u256_to_h256(log.log_query.key), + ); + let initial_value = *self + .initial_values + .entry(key) + .or_insert(log.log_query.read_value); + + let was_key_modified = self.modified_keys.get(&key).is_some(); + let is_key_modified = if log.log_query.rollback { + initial_value != log.log_query.read_value + } else { + initial_value != log.log_query.written_value + }; + + let is_write_initial = log.log_type == StorageLogQueryType::InitialWrite; + let field_to_change = if is_write_initial { + &mut self.metrics.initial_storage_writes + } else { + &mut self.metrics.repeated_storage_writes + }; + + match (was_key_modified, is_key_modified) { + (true, false) => { + self.modified_keys.remove(&key); + *field_to_change -= 1; + updates.push(UpdateItem { + key, + is_insertion: false, + is_write_initial, + }); + } + (false, true) => { + self.modified_keys.insert(key); + *field_to_change += 1; + updates.push(UpdateItem { + key, + is_insertion: true, + is_write_initial, + }); + } + _ => {} + } + } + updates + } + + fn rollback(&mut self, updates: Vec) { + for item in updates.into_iter().rev() { + let field_to_change = if item.is_write_initial { + &mut self.metrics.initial_storage_writes + } else { + &mut self.metrics.repeated_storage_writes + }; + + if item.is_insertion { + self.modified_keys.remove(&item.key); + *field_to_change -= 1; + } else { + self.modified_keys.insert(item.key); + *field_to_change += 1; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use zk_evm::aux_structures::{LogQuery, Timestamp}; + + fn storage_log_query( + key: U256, + read_value: U256, + written_value: U256, + rollback: bool, + is_initial: bool, + ) -> StorageLogQuery { + let log_type = if is_initial { + StorageLogQueryType::InitialWrite + } else { + StorageLogQueryType::RepeatedWrite + }; + StorageLogQuery { + log_query: LogQuery { + timestamp: Timestamp(0), + tx_number_in_block: 0, + aux_byte: 0, + shard_id: 0, + address: Default::default(), + key, + read_value, + written_value, + rw_flag: true, + rollback, + is_service: false, + }, + log_type, + } + } + + #[test] + fn storage_writes_deduplicator() { + // Each test scenario is a tuple (input, expected output, description). + let scenarios: Vec<(Vec, DeduplicatedWritesMetrics, String)> = vec![ + ( + vec![storage_log_query( + 0u32.into(), + 0u32.into(), + 1u32.into(), + false, + true, + )], + DeduplicatedWritesMetrics { + initial_storage_writes: 1, + repeated_storage_writes: 0, + }, + "single initial write".into(), + ), + ( + vec![ + storage_log_query(0u32.into(), 0u32.into(), 1u32.into(), false, true), + storage_log_query(1u32.into(), 0u32.into(), 1u32.into(), false, false), + ], + DeduplicatedWritesMetrics { + initial_storage_writes: 1, + repeated_storage_writes: 1, + }, + "initial and repeated write".into(), + ), + ( + vec![ + storage_log_query(0u32.into(), 0u32.into(), 1u32.into(), false, true), + storage_log_query(0u32.into(), 0u32.into(), 1u32.into(), true, true), + ], + DeduplicatedWritesMetrics { + initial_storage_writes: 0, + repeated_storage_writes: 0, + }, + "single rollback".into(), + ), + ( + vec![storage_log_query( + 0u32.into(), + 10u32.into(), + 10u32.into(), + false, + true, + )], + DeduplicatedWritesMetrics { + initial_storage_writes: 0, + repeated_storage_writes: 0, + }, + "idle write".into(), + ), + ( + vec![ + storage_log_query(0u32.into(), 0u32.into(), 1u32.into(), false, true), + storage_log_query(0u32.into(), 1u32.into(), 2u32.into(), false, true), + storage_log_query(0u32.into(), 2u32.into(), 0u32.into(), false, true), + ], + DeduplicatedWritesMetrics { + initial_storage_writes: 0, + repeated_storage_writes: 0, + }, + "idle write cycle".into(), + ), + ( + vec![ + storage_log_query(0u32.into(), 5u32.into(), 10u32.into(), false, true), + storage_log_query(1u32.into(), 1u32.into(), 2u32.into(), false, true), + storage_log_query(0u32.into(), 10u32.into(), 11u32.into(), false, true), + storage_log_query(0u32.into(), 10u32.into(), 11u32.into(), true, true), + storage_log_query(2u32.into(), 0u32.into(), 10u32.into(), false, false), + storage_log_query(2u32.into(), 10u32.into(), 0u32.into(), false, false), + storage_log_query(2u32.into(), 0u32.into(), 10u32.into(), false, false), + ], + DeduplicatedWritesMetrics { + initial_storage_writes: 2, + repeated_storage_writes: 1, + }, + "complex".into(), + ), + ]; + + for (input, expected_metrics, descr) in scenarios { + let actual_metrics = StorageWritesDeduplicator::apply_on_empty_state(&input); + assert_eq!( + actual_metrics, expected_metrics, + "test scenario failed: {}", + descr + ); + + // Check that `apply_and_rollback` works correctly. + let mut deduplicator = StorageWritesDeduplicator::new(); + let metrics_after_application = deduplicator.apply_and_rollback(&input); + assert_eq!( + metrics_after_application, expected_metrics, + "test scenario failed for `apply_and_rollback`: {}", + descr + ); + + assert_eq!( + deduplicator.metrics, + Default::default(), + "rolled back incorrectly for scenario: {}", + descr + ) + } + } +} diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 27c84c10a13f..e23c4f886466 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,6 +1,8 @@ use zksync_basic_types::{AccountTreeId, Address, U256}; -use zksync_config::constants::{BOOTLOADER_UTILITIES_ADDRESS, EVENT_WRITER_ADDRESS}; -use zksync_contracts::read_sys_contract_bytecode; +use zksync_config::constants::{ + BOOTLOADER_UTILITIES_ADDRESS, BYTECODE_COMPRESSOR_ADDRESS, EVENT_WRITER_ADDRESS, +}; +use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage}; use crate::{ block::DeployedContract, ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, @@ -21,32 +23,104 @@ pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { let mut deployed_system_contracts = [ - ("", "AccountCodeStorage", ACCOUNT_CODE_STORAGE_ADDRESS), - ("", "NonceHolder", NONCE_HOLDER_ADDRESS), - ("", "KnownCodesStorage", KNOWN_CODES_STORAGE_ADDRESS), + ( + "", + "AccountCodeStorage", + ACCOUNT_CODE_STORAGE_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "NonceHolder", + NONCE_HOLDER_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "KnownCodesStorage", + KNOWN_CODES_STORAGE_ADDRESS, + ContractLanguage::Sol, + ), ( "", "ImmutableSimulator", IMMUTABLE_SIMULATOR_STORAGE_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "ContractDeployer", + CONTRACT_DEPLOYER_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "L1Messenger", + L1_MESSENGER_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "MsgValueSimulator", + MSG_VALUE_SIMULATOR_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "L2EthToken", + L2_ETH_TOKEN_ADDRESS, + ContractLanguage::Sol, + ), + ( + "precompiles/", + "Keccak256", + KECCAK256_PRECOMPILE_ADDRESS, + ContractLanguage::Yul, + ), + ( + "precompiles/", + "SHA256", + SHA256_PRECOMPILE_ADDRESS, + ContractLanguage::Yul, + ), + ( + "precompiles/", + "Ecrecover", + ECRECOVER_PRECOMPILE_ADDRESS, + ContractLanguage::Yul, + ), + ( + "", + "SystemContext", + SYSTEM_CONTEXT_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "EventWriter", + EVENT_WRITER_ADDRESS, + ContractLanguage::Yul, + ), + ( + "", + "BootloaderUtilities", + BOOTLOADER_UTILITIES_ADDRESS, + ContractLanguage::Sol, + ), + ( + "", + "BytecodeCompressor", + BYTECODE_COMPRESSOR_ADDRESS, + ContractLanguage::Sol, ), - ("", "ContractDeployer", CONTRACT_DEPLOYER_ADDRESS), - ("", "L1Messenger", L1_MESSENGER_ADDRESS), - ("", "MsgValueSimulator", MSG_VALUE_SIMULATOR_ADDRESS), - ("", "L2EthToken", L2_ETH_TOKEN_ADDRESS), - ("precompiles/", "Keccak256", KECCAK256_PRECOMPILE_ADDRESS), - ("precompiles/", "SHA256", SHA256_PRECOMPILE_ADDRESS), - ("precompiles/", "Ecrecover", ECRECOVER_PRECOMPILE_ADDRESS), - ("", "SystemContext", SYSTEM_CONTEXT_ADDRESS), - ("", "EventWriter", EVENT_WRITER_ADDRESS), - ("", "BootloaderUtilities", BOOTLOADER_UTILITIES_ADDRESS), ] - .map(|(path, name, address)| DeployedContract { + .map(|(path, name, address, contract_lang)| DeployedContract { account_id: AccountTreeId::new(address), - bytecode: read_sys_contract_bytecode(path, name), + bytecode: read_sys_contract_bytecode(path, name, contract_lang), }) .to_vec(); - let empty_bytecode = read_sys_contract_bytecode("", "EmptyContract"); + let empty_bytecode = read_sys_contract_bytecode("", "EmptyContract", ContractLanguage::Sol); // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. let empty_system_contracts = diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 86321ad4b9f6..718473cdc8e5 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -1,22 +1,28 @@ +// Built-in uses use std::convert::{TryFrom, TryInto}; +// External uses use rlp::{DecoderError, Rlp, RlpStream}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tiny_keccak::keccak256; +use zk_evm::abstractions::MAX_MEMORY_BYTES; use zksync_basic_types::H256; use zksync_config::constants::MAX_GAS_PER_PUBDATA_BYTE; use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; +use zksync_utils::u256_to_h256; +use crate::l1::L1Tx; +use crate::L1TxCommonData; use crate::{ web3::types::AccessList, Address, Bytes, EIP712TypedStructure, Eip712Domain, L2ChainId, Nonce, PackedEthSignature, StructBuilder, U256, U64, }; +// Local uses use super::{EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE}; use crate::fee::Fee; use crate::l2::{L2Tx, TransactionType}; -use zksync_utils::u256_to_h256; /// Call contract request (eth_call / eth_estimateGas) /// @@ -143,7 +149,7 @@ impl CallRequestBuilder { } } -#[derive(Debug, Error)] +#[derive(Debug, Error, PartialEq)] pub enum SerializationTransactionError { #[error("transaction type is not supported")] UnknownTransactionFormat, @@ -171,6 +177,15 @@ pub enum SerializationTransactionError { AccessListsNotSupported, #[error("nonce has max value")] TooBigNonce, + /// TooHighGas is a sanity error to avoid extremely big numbers specified + /// to gas and pubdata price. + #[error("{0}")] + TooHighGas(String), + /// OversizedData is returned if the raw tx size is greater + /// than some meaningful limit a user might use. This is not a consensus error + /// making the transaction invalid, rather a DOS protection. + #[error("oversized data. max: {0}; actual: {0}")] + OversizedData(usize, usize), } /// Description of a Transaction, pending or in the chain. @@ -248,7 +263,6 @@ impl PaymasterParams { #[serde(rename_all = "camelCase")] pub struct Eip712Meta { pub gas_per_pubdata: U256, - // This field need to backward compatibility with the old way of withdraw. Will be deleted after public testnet #[serde(default)] pub factory_deps: Option>>, pub custom_signature: Option>, @@ -522,6 +536,7 @@ impl TransactionRequest { pub fn from_bytes( bytes: &[u8], chain_id: u16, + max_tx_size: usize, ) -> Result<(Self, H256), SerializationTransactionError> { let rlp; let mut tx = match bytes.first() { @@ -610,7 +625,6 @@ impl TransactionRequest { } _ => return Err(SerializationTransactionError::UnknownTransactionFormat), }; - let factory_deps_ref = tx .eip712_meta .as_ref() @@ -618,7 +632,6 @@ impl TransactionRequest { if let Some(deps) = factory_deps_ref { validate_factory_deps(deps)?; } - tx.raw = Some(Bytes(bytes.to_vec())); let default_signed_message = tx.get_default_signed_message(chain_id); @@ -639,6 +652,8 @@ impl TransactionRequest { H256(keccak256(bytes)) }; + check_tx_data(&tx, max_tx_size)?; + Ok((tx, hash)) } @@ -677,24 +692,42 @@ impl TransactionRequest { Ok(address) } - fn get_fee_data(&self) -> Result { + fn get_fee_data_checked(&self) -> Result { + if self.gas_price > u64::MAX.into() { + return Err(SerializationTransactionError::TooHighGas( + "max fee per gas higher than 2^64-1".to_string(), + )); + } + let gas_per_pubdata_limit = if let Some(meta) = &self.eip712_meta { + if meta.gas_per_pubdata > u64::MAX.into() { + return Err(SerializationTransactionError::TooHighGas( + "max fee per pubdata byte higher than 2^64-1".to_string(), + )); + } meta.gas_per_pubdata } else { // For transactions that don't support corresponding field, a default is chosen. U256::from(MAX_GAS_PER_PUBDATA_BYTE) }; + let max_priority_fee_per_gas = self.max_priority_fee_per_gas.unwrap_or(self.gas_price); + if max_priority_fee_per_gas > u64::MAX.into() { + return Err(SerializationTransactionError::TooHighGas( + "max priority fee per gas higher than 2^64-1".to_string(), + )); + } + Ok(Fee { gas_limit: self.gas, max_fee_per_gas: self.gas_price, - max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or(self.gas_price), + max_priority_fee_per_gas, gas_per_pubdata_limit, }) } fn get_nonce_checked(&self) -> Result { - if self.nonce <= u32::MAX.into() { + if self.nonce <= U256::from(u32::MAX) { Ok(Nonce(self.nonce.as_u32())) } else { Err(SerializationTransactionError::TooBigNonce) @@ -706,9 +739,8 @@ impl TryFrom for L2Tx { type Error = SerializationTransactionError; fn try_from(value: TransactionRequest) -> Result { - let fee = value.get_fee_data()?; + let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; - // Attempt to decode factory deps. let factory_deps = value .eip712_meta @@ -749,24 +781,6 @@ impl TryFrom for L2Tx { } } -impl From for TransactionRequest { - fn from(value: CallRequest) -> Self { - TransactionRequest { - nonce: Default::default(), - from: value.from, - to: value.to, - value: value.value.unwrap_or_default(), - gas_price: value.gas_price.unwrap_or_default(), - gas: value.gas.unwrap_or_default(), - input: value.data.unwrap_or_default(), - transaction_type: value.transaction_type, - access_list: value.access_list, - eip712_meta: value.eip712_meta, - ..Default::default() - } - } -} - impl From for CallRequest { fn from(tx: L2Tx) -> Self { let mut meta = Eip712Meta { @@ -794,11 +808,68 @@ impl From for CallRequest { } } -impl TryFrom for L2Tx { +pub fn tx_req_from_call_req( + call_request: CallRequest, + max_tx_size: usize, +) -> Result { + let calldata = call_request.data.unwrap_or_default(); + + let transaction_request = TransactionRequest { + nonce: Default::default(), + from: call_request.from, + to: call_request.to, + value: call_request.value.unwrap_or_default(), + gas_price: call_request.gas_price.unwrap_or_default(), + gas: call_request.gas.unwrap_or_default(), + input: calldata, + transaction_type: call_request.transaction_type, + access_list: call_request.access_list, + eip712_meta: call_request.eip712_meta, + ..Default::default() + }; + check_tx_data(&transaction_request, max_tx_size)?; + Ok(transaction_request) +} + +pub fn l2_tx_from_call_req( + call_request: CallRequest, + max_tx_size: usize, +) -> Result { + let tx_request: TransactionRequest = tx_req_from_call_req(call_request, max_tx_size)?; + let l2_tx = tx_request.try_into()?; + Ok(l2_tx) +} + +impl TryFrom for L1Tx { type Error = SerializationTransactionError; fn try_from(tx: CallRequest) -> Result { - let tx: TransactionRequest = tx.into(); - tx.try_into() + // L1 transactions have no limitations on the transaction size. + let tx: L2Tx = l2_tx_from_call_req(tx, MAX_MEMORY_BYTES)?; + + // Note, that while the user has theoretically provided the fee for ETH on L1, + // the payment to the operator as well as refunds happen on L2 and so all the ETH + // that the transaction requires to pay the operator needs to be minted on L2. + let total_needed_eth = + tx.execute.value + tx.common_data.fee.max_fee_per_gas * tx.common_data.fee.gas_limit; + + // Note, that we do not set refund_recipient here, to keep it explicitly 0, + // so that during fee estimation it is taken into account that the refund recipient may be a different address + let common_data = L1TxCommonData { + sender: tx.common_data.initiator_address, + max_fee_per_gas: tx.common_data.fee.max_fee_per_gas, + gas_limit: tx.common_data.fee.gas_limit, + gas_per_pubdata_limit: tx.common_data.fee.gas_per_pubdata_limit, + to_mint: total_needed_eth, + ..Default::default() + }; + + let tx = L1Tx { + execute: tx.execute, + common_data, + received_timestamp_ms: 0u64, + }; + + Ok(tx) } } @@ -834,16 +905,35 @@ pub fn validate_factory_deps( Ok(()) } +fn check_tx_data( + tx_request: &TransactionRequest, + max_tx_size: usize, +) -> Result<(), SerializationTransactionError> { + let l2_tx: L2Tx = tx_request.clone().try_into()?; + // since abi_encoding_len returns 32-byte words multiplication on 32 is needed + let tx_size = l2_tx.abi_encoding_len() * 32; + if tx_size > max_tx_size { + return Err(SerializationTransactionError::OversizedData( + max_tx_size, + tx_size, + )); + }; + Ok(()) +} + #[cfg(test)] mod tests { use super::*; - use crate::web3::api::Namespace; - use crate::web3::transports::test::TestTransport; - use crate::web3::types::{TransactionParameters, H256, U256}; + use crate::web3::{ + api::Namespace, + transports::test::TestTransport, + types::{TransactionParameters, H256, U256}, + }; use secp256k1::SecretKey; #[tokio::test] async fn decode_real_tx() { + let random_tx_max_size = 1_000_000; // bytes let accounts = crate::web3::api::Accounts::new(TestTransport::default()); let pk = hex::decode("4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318") @@ -865,8 +955,12 @@ mod tests { access_list: None, }; let signed_tx = accounts.sign_transaction(tx.clone(), &key).await.unwrap(); - let (tx2, _) = - TransactionRequest::from_bytes(signed_tx.raw_transaction.0.as_slice(), 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes( + signed_tx.raw_transaction.0.as_slice(), + 270, + random_tx_max_size, + ) + .unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price.unwrap(), tx2.gas_price); assert_eq!(tx.nonce.unwrap(), tx2.nonce); @@ -877,6 +971,7 @@ mod tests { #[test] fn decode_rlp() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -899,7 +994,7 @@ mod tests { let mut rlp = RlpStream::new(); tx.rlp(&mut rlp, 270, Some(&signature)); let data = rlp.out().to_vec(); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, 270, random_tx_max_size).unwrap(); assert_eq!(tx.gas, tx2.gas); assert_eq!(tx.gas_price, tx2.gas_price); assert_eq!(tx.nonce, tx2.nonce); @@ -916,6 +1011,7 @@ mod tests { #[test] fn decode_eip712_with_meta() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -955,13 +1051,14 @@ mod tests { tx.r = Some(U256::from_big_endian(signature.r())); tx.s = Some(U256::from_big_endian(signature.s())); - let (tx2, _) = TransactionRequest::from_bytes(&data, 270).unwrap(); + let (tx2, _) = TransactionRequest::from_bytes(&data, 270, random_tx_max_size).unwrap(); assert_eq!(tx, tx2); } #[test] fn check_recovered_public_key_eip712() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -991,13 +1088,15 @@ mod tests { let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); - let (decoded_tx, _) = TransactionRequest::from_bytes(encoded_tx.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(encoded_tx.as_slice(), 270, random_tx_max_size).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } #[test] fn check_recovered_public_key_eip712_with_wrong_chain_id() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1030,15 +1129,17 @@ mod tests { let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId(270)); - let decoded_tx = TransactionRequest::from_bytes(encoded_tx.as_slice(), 272); - assert!(matches!( + let decoded_tx = + TransactionRequest::from_bytes(encoded_tx.as_slice(), 272, random_tx_max_size); + assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(270))) - )); + ); } #[test] fn check_recovered_public_key_eip1559() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1070,13 +1171,15 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let (decoded_tx, _) = TransactionRequest::from_bytes(data.as_slice(), 270).unwrap(); + let (decoded_tx, _) = + TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size).unwrap(); let recovered_signer = decoded_tx.from.unwrap(); assert_eq!(address, recovered_signer); } #[test] fn check_recovered_public_key_eip1559_with_wrong_chain_id() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1107,15 +1210,16 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270); - assert!(matches!( + let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + assert_eq!( decoded_tx, Err(SerializationTransactionError::WrongChainId(Some(272))) - )); + ); } #[test] fn check_decode_eip1559_with_access_list() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1147,15 +1251,16 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_1559_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); - assert!(matches!( + let res = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) - )); + ); } #[test] fn check_failed_to_decode_eip2930() { + let random_tx_max_size = 1_000_000; // bytes let private_key = H256::random(); let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); @@ -1184,11 +1289,11 @@ mod tests { let mut data = rlp.out().to_vec(); data.insert(0, EIP_2930_TX_TYPE); - let res = TransactionRequest::from_bytes(data.as_slice(), 270); - assert!(matches!( + let res = TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size); + assert_eq!( res, Err(SerializationTransactionError::AccessListsNotSupported) - )); + ); } #[test] @@ -1204,16 +1309,144 @@ mod tests { assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { - nonce: U256::from((u32::MAX as u64) + 1), + nonce: U256::from(u32::MAX as u64 + 1), to: Some(Address::repeat_byte(0x1)), from: Some(Address::repeat_byte(0x1)), value: U256::zero(), ..Default::default() }; let execute_tx2: Result = tx2.try_into(); + assert_eq!( + execute_tx2.unwrap_err(), + SerializationTransactionError::TooBigNonce + ); + } + + #[test] + fn transaction_request_with_big_gas() { + let tx1 = TransactionRequest { + to: Some(Address::repeat_byte(0x1)), + from: Some(Address::repeat_byte(0x1)), + value: U256::zero(), + gas_price: U256::MAX, + ..Default::default() + }; + let execute_tx1: Result = tx1.try_into(); + assert_eq!( + execute_tx1.unwrap_err(), + SerializationTransactionError::TooHighGas( + "max fee per gas higher than 2^64-1".to_string() + ) + ); + + let tx2 = TransactionRequest { + to: Some(Address::repeat_byte(0x1)), + from: Some(Address::repeat_byte(0x1)), + value: U256::zero(), + max_priority_fee_per_gas: Some(U256::MAX), + ..Default::default() + }; + let execute_tx2: Result = tx2.try_into(); + assert_eq!( + execute_tx2.unwrap_err(), + SerializationTransactionError::TooHighGas( + "max priority fee per gas higher than 2^64-1".to_string() + ) + ); + + let tx3 = TransactionRequest { + to: Some(Address::repeat_byte(0x1)), + from: Some(Address::repeat_byte(0x1)), + value: U256::zero(), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: U256::MAX, + ..Default::default() + }), + ..Default::default() + }; + + let execute_tx3: Result = tx3.try_into(); + assert_eq!( + execute_tx3.unwrap_err(), + SerializationTransactionError::TooHighGas( + "max fee per pubdata byte higher than 2^64-1".to_string() + ) + ); + } + + #[test] + fn transaction_request_with_oversize_data() { + let random_tx_max_size = 1_000_000; // bytes + let private_key = H256::random(); + let address = PackedEthSignature::address_from_private_key(&private_key).unwrap(); + // choose some number that devides on 8 and is > 1_000_000 + let factory_dep = vec![2u8; 1600000]; + let factory_deps: Vec> = factory_dep.chunks(32).map(|s| s.into()).collect(); + let mut tx = TransactionRequest { + nonce: U256::from(1u32), + to: Some(Address::random()), + from: Some(address), + value: U256::from(10u32), + gas_price: U256::from(11u32), + max_priority_fee_per_gas: Some(U256::from(0u32)), + gas: U256::from(12u32), + input: Bytes::from(vec![1, 2, 3]), + transaction_type: Some(U64::from(EIP_712_TX_TYPE)), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: U256::from(4u32), + factory_deps: Some(factory_deps), + custom_signature: Some(vec![1, 2, 3]), + paymaster_params: Some(PaymasterParams { + paymaster: Default::default(), + paymaster_input: vec![], + }), + }), + chain_id: Some(270), + ..Default::default() + }; + + let msg = + PackedEthSignature::typed_data_to_signed_bytes(&Eip712Domain::new(L2ChainId(270)), &tx); + let signature = PackedEthSignature::sign_raw(&private_key, &msg).unwrap(); + + let mut rlp = RlpStream::new(); + tx.rlp(&mut rlp, 270, Some(&signature)); + let mut data = rlp.out().to_vec(); + data.insert(0, EIP_712_TX_TYPE); + tx.raw = Some(Bytes(data.clone())); + tx.v = Some(U64::from(signature.v())); + tx.r = Some(U256::from_big_endian(signature.r())); + tx.s = Some(U256::from_big_endian(signature.s())); assert!(matches!( - execute_tx2, - Err(SerializationTransactionError::TooBigNonce) + TransactionRequest::from_bytes(data.as_slice(), 270, random_tx_max_size), + Err(SerializationTransactionError::OversizedData(_, _)) + )) + } + + #[test] + fn check_call_req_to_l2_tx_oversize_data() { + let factory_dep = vec![2u8; 1600000]; + let random_tx_max_size = 100_000; // bytes + let call_request = CallRequest { + from: Some(Address::random()), + to: Some(Address::random()), + gas: Some(U256::from(12u32)), + gas_price: Some(U256::from(12u32)), + max_fee_per_gas: Some(U256::from(12u32)), + max_priority_fee_per_gas: Some(U256::from(12u32)), + value: Some(U256::from(12u32)), + data: Some(Bytes(factory_dep)), + transaction_type: Some(U64::from(EIP_712_TX_TYPE)), + access_list: None, + eip712_meta: None, + }; + + let try_to_l2_tx: Result = + l2_tx_from_call_req(call_request, random_tx_max_size); + + assert!(matches!( + try_to_l2_tx, + Err(SerializationTransactionError::OversizedData(_, _)) )); } } diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index 874146ff5c4d..bd8a81f458b4 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -6,6 +6,7 @@ use std::fmt::Debug; use zksync_basic_types::{Address, H256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; pub mod execute; pub mod primitives; @@ -24,6 +25,7 @@ pub struct TransactionExecutionResult { pub execution_status: TxExecutionStatus, pub refunded_gas: u32, pub operator_suggested_refund: u32, + pub compressed_bytecodes: Vec, } #[derive(Debug, Clone)] diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs index 91ac40d57554..83d9801f8f1a 100644 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ b/core/lib/types/src/tx/tx_execution_info.rs @@ -1,9 +1,8 @@ use crate::commitment::CommitmentSerializable; use crate::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; use crate::l2_to_l1_log::L2ToL1Log; -use crate::log_query_sorter::sort_storage_access_queries; use crate::writes::{InitialStorageWrite, RepeatedStorageWrite}; -use crate::{StorageLogQuery, StorageLogQueryType, VmEvent}; +use crate::{StorageLogQuery, VmEvent, PUBLISH_BYTECODE_OVERHEAD}; use std::ops::{Add, AddAssign}; use zksync_utils::bytecode::bytecode_len_in_bytes; @@ -32,10 +31,21 @@ impl TxExecutionStatus { } } -#[derive(Debug, Clone, Copy, Default, serde::Serialize, PartialEq)] -pub struct ExecutionMetrics { +#[derive(Debug, Default, Clone, Copy, PartialEq)] +pub struct DeduplicatedWritesMetrics { pub initial_storage_writes: usize, pub repeated_storage_writes: usize, +} + +impl DeduplicatedWritesMetrics { + pub fn size(&self) -> usize { + self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE + + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE + } +} + +#[derive(Debug, Clone, Copy, Default, serde::Serialize, PartialEq)] +pub struct ExecutionMetrics { pub gas_used: usize, pub published_bytecode_bytes: usize, pub l2_l1_long_messages: usize, @@ -49,14 +59,8 @@ pub struct ExecutionMetrics { } impl ExecutionMetrics { - pub fn storage_writes(&self) -> usize { - self.initial_storage_writes + self.repeated_storage_writes - } - pub fn size(&self) -> usize { - self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE - + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE - + self.l2_l1_logs * L2ToL1Log::SERIALIZED_SIZE + self.l2_l1_logs * L2ToL1Log::SERIALIZED_SIZE + self.l2_l1_long_messages + self.published_bytecode_bytes } @@ -68,22 +72,23 @@ impl ExecutionMetrics { contracts_used: usize, cycles_used: u32, ) -> Self { - let (initial_storage_writes, repeated_storage_writes) = - get_initial_and_repeated_storage_writes(logs.storage_logs.as_slice()); - + // We published the data as ABI-encoded `bytes`, so the total length is: + // - message length in bytes, rounded up to a multiple of 32 + // - 32 bytes of encoded offset + // - 32 bytes of encoded length let l2_l1_long_messages = extract_long_l2_to_l1_messages(&logs.events) .iter() - .map(|event| event.len()) + .map(|event| (event.len() + 31) / 32 * 32 + 64) .sum(); let published_bytecode_bytes = extract_published_bytecodes(&logs.events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash)) + .map(|bytecodehash| { + bytecode_len_in_bytes(*bytecodehash) + PUBLISH_BYTECODE_OVERHEAD as usize + }) .sum(); ExecutionMetrics { - initial_storage_writes: initial_storage_writes as usize, - repeated_storage_writes: repeated_storage_writes as usize, gas_used, published_bytecode_bytes, l2_l1_long_messages, @@ -103,8 +108,6 @@ impl Add for ExecutionMetrics { fn add(self, other: ExecutionMetrics) -> ExecutionMetrics { ExecutionMetrics { - initial_storage_writes: self.initial_storage_writes + other.initial_storage_writes, - repeated_storage_writes: self.repeated_storage_writes + other.repeated_storage_writes, published_bytecode_bytes: self.published_bytecode_bytes + other.published_bytecode_bytes, contracts_deployed: self.contracts_deployed + other.contracts_deployed, @@ -125,22 +128,3 @@ impl AddAssign for ExecutionMetrics { *self = *self + other; } } - -pub fn get_initial_and_repeated_storage_writes( - storage_log_queries: &[StorageLogQuery], -) -> (u32, u32) { - let mut initial_storage_writes = 0; - let mut repeated_storage_writes = 0; - - let (_, deduped_storage_logs) = sort_storage_access_queries(storage_log_queries); - for log in &deduped_storage_logs { - match log.log_type { - StorageLogQueryType::InitialWrite => { - initial_storage_writes += 1; - } - StorageLogQueryType::RepeatedWrite => repeated_storage_writes += 1, - StorageLogQueryType::Read => {} - } - } - (initial_storage_writes, repeated_storage_writes) -} diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 19cb669218cc..c7935d048001 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -23,6 +23,8 @@ thiserror = "1.0" futures = "0.3" hex = "0.4" envy = "0.4" +reqwest = { version = "0.11", features = ["blocking"] } +itertools = "0.10.5" [dev-dependencies] serde_json = "1.0.0" diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 01802b826b1e..05cb5619f292 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,3 +1,7 @@ +use itertools::Itertools; +use std::collections::HashMap; +use std::convert::TryInto; +use zksync_basic_types::ethabi::{encode, Token}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -5,7 +9,7 @@ use crate::bytes_to_chunks; const MAX_BYTECODE_LENGTH_IN_WORDS: usize = (1 << 16) - 1; const MAX_BYTECODE_LENGTH_BYTES: usize = MAX_BYTECODE_LENGTH_IN_WORDS * 32; -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error, PartialEq)] pub enum InvalidBytecodeError { #[error("Bytecode too long: {0} bytes, while max {1} allowed")] BytecodeTooLong(usize, usize), @@ -15,6 +19,117 @@ pub enum InvalidBytecodeError { BytecodeLengthIsNotDivisibleBy32, } +#[derive(Debug, thiserror::Error)] +pub enum FailedToCompressBytecodeError { + #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] + DictionaryOverflow, + #[error("Bytecode is invalid: {0}")] + InvalidBytecode(#[from] InvalidBytecodeError), +} + +/// Implelements a simple compression algorithm for the bytecode. +pub fn compress_bytecode(code: &[u8]) -> Result, FailedToCompressBytecodeError> { + validate_bytecode(code)?; + + // Statistic is a hash map of values (number of occurences, first occurence position), + // this is needed to ensure that the determinism during sorting of the statistic, i.e. + // each element will have unique first occurence position + let mut statistic: HashMap = HashMap::new(); + let mut dictionary: HashMap = HashMap::new(); + let mut encoded_data: Vec = Vec::new(); + + // Split original bytecode into 8-byte chunks. + for (position, chunk_bytes) in code.chunks(8).enumerate() { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Count the number of occurrences of each chunk. + statistic.entry(chunk).or_insert((0, position)).0 += 1; + } + + let mut statistic_sorted_by_value: Vec<_> = statistic.into_iter().collect::>(); + statistic_sorted_by_value.sort_by_key(|x| x.1); + + // The dictionary size is limited by 2^16 - 1, + if statistic_sorted_by_value.len() > u16::MAX.into() { + return Err(FailedToCompressBytecodeError::DictionaryOverflow); + } + + // Fill the dictionary with the pmost popular chunks. + // The most popular chunks will be encoded with the smallest indexes, so that + // the 255 most popular chunks will be encoded with one zero byte. + // And the encoded data will be filled with more zeros, so + // the calldata that will be sent to L1 will be cheaper. + for (chunk, _) in statistic_sorted_by_value.iter().rev() { + dictionary.insert(*chunk, dictionary.len() as u16); + } + + for chunk_bytes in code.chunks(8) { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Add the index of the chunk to the encoded data. + encoded_data.extend(dictionary.get(&chunk).unwrap().to_be_bytes()); + } + + // Prepare the raw compressed bytecode in the following format: + // - 2 bytes: the length of the dictionary (N) + // - N bytes: packed dictionary bytes + // - remaining bytes: packed encoded data bytes + + let mut compressed: Vec = Vec::new(); + compressed.extend((dictionary.len() as u16).to_be_bytes()); + + dictionary + .into_iter() + .map(|(k, v)| (v, k)) + .sorted() + .for_each(|(_, chunk)| { + compressed.extend(chunk.to_be_bytes()); + }); + + compressed.extend(encoded_data); + + Ok(compressed) +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CompressedBytecodeInfo { + pub original: Vec, + pub compressed: Vec, +} + +impl CompressedBytecodeInfo { + pub fn from_original(bytecode: Vec) -> Result { + let compressed = compress_bytecode(&bytecode)?; + + let result = Self { + original: bytecode, + compressed, + }; + + Ok(result) + } + + pub fn encode_call(&self) -> Vec { + let bytecode_hash = hash_bytecode(&self.original).as_bytes().to_vec(); + let empty_cell = vec![0u8; 32]; + + let bytes_encoded = encode(&[ + Token::Bytes(self.original.clone()), + Token::Bytes(self.compressed.clone()), + ]); + + bytecode_hash + .into_iter() + .chain(empty_cell.into_iter()) + .chain(bytes_encoded.into_iter()) + .collect() + } +} + pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { let bytecode_len = code.len(); @@ -53,3 +168,57 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +#[cfg(test)] +mod test { + use super::*; + + fn decompress_bytecode(raw_compressed_bytecode: &[u8]) -> Vec { + let mut decompressed: Vec = Vec::new(); + let mut dictionary: Vec = Vec::new(); + + let dictionary_len = u16::from_be_bytes(raw_compressed_bytecode[0..2].try_into().unwrap()); + for index in 0..dictionary_len { + let chunk = u64::from_be_bytes( + raw_compressed_bytecode[2 + index as usize * 8..10 + index as usize * 8] + .try_into() + .unwrap(), + ); + dictionary.push(chunk); + } + + let encoded_data = &raw_compressed_bytecode[2 + dictionary_len as usize * 8..]; + for index_bytes in encoded_data.chunks(2) { + let index = u16::from_be_bytes(index_bytes.try_into().unwrap()); + + let chunk = dictionary[index as usize]; + decompressed.extend(chunk.to_be_bytes()); + } + + decompressed + } + + #[test] + fn bytecode_compression_test() { + let example_code = hex::decode("000200000000000200010000000103550000006001100270000000150010019d0000000101200190000000080000c13d0000000001000019004e00160000040f0000000101000039004e00160000040f0000001504000041000000150510009c000000000104801900000040011002100000000001310019000000150320009c0000000002048019000000600220021000000000012100190000004f0001042e000000000100001900000050000104300000008002000039000000400020043f0000000002000416000000000110004c000000240000613d000000000120004c0000004d0000c13d000000200100003900000100001004430000012000000443000001000100003900000040020000390000001d03000041004e000a0000040f000000000120004c0000004d0000c13d0000000001000031000000030110008c0000004d0000a13d0000000101000367000000000101043b0000001601100197000000170110009c0000004d0000c13d0000000101000039000000000101041a0000000202000039000000000202041a000000400300043d00000040043000390000001805200197000000000600041a0000000000540435000000180110019700000020043000390000000000140435000000a0012002700000001901100197000000600430003900000000001404350000001a012001980000001b010000410000000001006019000000b8022002700000001c02200197000000000121019f0000008002300039000000000012043500000018016001970000000000130435000000400100043d0000000002130049000000a0022000390000000003000019004e000a0000040f004e00140000040f0000004e000004320000004f0001042e000000500001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000008903573000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000ffffff0000000000008000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000007fffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let compressed = compress_bytecode(&example_code).unwrap(); + let decompressed = decompress_bytecode(&compressed); + + assert_eq!(example_code, decompressed); + } + + #[test] + fn bytecode_compression_statistics_test() { + let example_code = + hex::decode("0000000000000000111111111111111111111111111111112222222222222222") + .unwrap(); + // The size of the dictionary should be 0x0003 + // The dictionary itself should put the most common chunk first, i.e. 0x1111111111111111 + // Then, the ordering does not matter, but the algorithm will return the one with the highest position, i.e. 0x2222222222222222 + let expected_encoding = + hex::decode("00031111111111111111222222222222222200000000000000000002000000000001") + .unwrap(); + + assert_eq!(expected_encoding, compress_bytecode(&example_code).unwrap()); + } +} diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs index 959f3fd2c8f3..3cd3fa71974e 100644 --- a/core/lib/utils/src/convert.rs +++ b/core/lib/utils/src/convert.rs @@ -126,6 +126,17 @@ pub fn bytes_to_be_words(vec: Vec) -> Vec { vec.chunks(32).map(U256::from_big_endian).collect() } +pub fn be_words_to_bytes(words: &[U256]) -> Vec { + words + .iter() + .flat_map(|w| { + let mut bytes = [0u8; 32]; + w.to_big_endian(&mut bytes); + bytes + }) + .collect() +} + pub fn u256_to_h256(num: U256) -> H256 { let mut bytes = [0u8; 32]; num.to_big_endian(&mut bytes); diff --git a/core/lib/utils/src/http_with_retries.rs b/core/lib/utils/src/http_with_retries.rs new file mode 100644 index 000000000000..b66ac76bfad7 --- /dev/null +++ b/core/lib/utils/src/http_with_retries.rs @@ -0,0 +1,50 @@ +use reqwest::header::HeaderMap; +use reqwest::{Client, Error, Method, Response}; +use tokio::time::{sleep, Duration}; + +/// Method to send HTTP request with fixed number of retires with exponential back-offs. +pub async fn send_request_with_retries( + url: &str, + max_retries: usize, + method: Method, + headers: Option, + body: Option>, +) -> Result { + let mut retries = 0usize; + let mut delay = Duration::from_secs(1); + loop { + match send_request(url, method.clone(), headers.clone(), body.clone()).await { + Ok(response) => return Ok(response), + Err(err) => { + if retries >= max_retries { + return Err(err); + } + retries += 1; + sleep(delay).await; + delay = delay.checked_mul(2).unwrap_or(Duration::MAX); + } + } + } +} + +async fn send_request( + url: &str, + method: Method, + headers: Option, + body: Option>, +) -> Result { + let client = Client::new(); + let mut request = client.request(method, url); + + if let Some(headers) = headers { + request = request.headers(headers); + } + + if let Some(body) = body { + request = request.body(body); + } + + let request = request.build()?; + let response = client.execute(request).await?; + Ok(response) +} diff --git a/core/lib/utils/src/lib.rs b/core/lib/utils/src/lib.rs index 4875d3b7ea3a..0138dca4a27d 100644 --- a/core/lib/utils/src/lib.rs +++ b/core/lib/utils/src/lib.rs @@ -3,6 +3,7 @@ pub mod bytecode; mod convert; mod env_tools; +pub mod http_with_retries; mod macros; pub mod misc; pub mod panic_extractor; @@ -10,6 +11,7 @@ pub mod panic_notify; mod serde_wrappers; pub mod test_utils; pub mod time; + pub use convert::*; pub use env_tools::*; pub use macros::*; diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 89f08a781400..ee98c6f20c53 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -172,6 +172,7 @@ fn get_sampling_ratio() -> f64 { /// If the sentry URL is provided via an environment variable, this function will also initialize sentry. /// Returns a sentry client guard. The full description can be found in the official documentation: /// https://docs.sentry.io/platforms/rust/#configure +#[must_use] pub fn init() -> Option { let log_format = std::env::var("MISC_LOG_FORMAT").unwrap_or_else(|_| "plain".to_string()); let service_name = diff --git a/core/lib/vm/src/bootloader_state.rs b/core/lib/vm/src/bootloader_state.rs index e6c572d6eb7f..2ecb845dfa64 100644 --- a/core/lib/vm/src/bootloader_state.rs +++ b/core/lib/vm/src/bootloader_state.rs @@ -1,3 +1,5 @@ +use crate::vm_with_bootloader::TX_DESCRIPTION_OFFSET; + /// Intermediate bootloader-related VM state. /// /// Required to process transactions one by one (since we intercept the VM execution to execute @@ -19,6 +21,9 @@ pub(crate) struct BootloaderState { tx_to_execute: usize, /// Vector that contains sizes of all pushed transactions. tx_sizes: Vec, + + /// The number of 32-byte words spent on the already included compressed bytecodes. + compressed_bytecodes_encoding: usize, } impl BootloaderState { @@ -68,6 +73,18 @@ impl BootloaderState { pub(crate) fn get_tx_size(&self, tx_index: usize) -> usize { self.tx_sizes[tx_index] } + + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.tx_sizes.iter().take(tx_index).sum::() + } + + pub(crate) fn add_compressed_bytecode(&mut self, bytecode_compression_encoding_length: usize) { + self.compressed_bytecodes_encoding += bytecode_compression_encoding_length; + } + + pub(crate) fn get_compressed_bytecodes(&self) -> usize { + self.compressed_bytecodes_encoding + } } #[cfg(test)] diff --git a/core/lib/vm/src/errors/tx_revert_reason.rs b/core/lib/vm/src/errors/tx_revert_reason.rs index ec3242d7954f..9259dd87a376 100644 --- a/core/lib/vm/src/errors/tx_revert_reason.rs +++ b/core/lib/vm/src/errors/tx_revert_reason.rs @@ -9,7 +9,8 @@ use super::{BootloaderErrorCode, VmRevertReason}; pub enum TxRevertReason { // Can only be returned in EthCall execution mode (=ExecuteOnly) EthCall(VmRevertReason), - TxOutOfGas, + // Returned when the execution of an L2 transaction has failed + TxReverted(VmRevertReason), // Can only be returned in VerifyAndExecute ValidationFailed(VmRevertReason), PaymasterValidationFailed(VmRevertReason), @@ -160,7 +161,7 @@ impl Display for TxRevertReason { match &self { // EthCall reason is usually returned unchanged. TxRevertReason::EthCall(reason) => write!(f, "{}", reason), - TxRevertReason::TxOutOfGas => write!(f, "out of gas"), + TxRevertReason::TxReverted(reason) => write!(f, "{}", reason), TxRevertReason::ValidationFailed(reason) => { write!(f, "Account validation error: {}", reason) } diff --git a/core/lib/vm/src/event_sink.rs b/core/lib/vm/src/event_sink.rs index e850fb4e2bd4..5bfd279b22f7 100644 --- a/core/lib/vm/src/event_sink.rs +++ b/core/lib/vm/src/event_sink.rs @@ -9,7 +9,7 @@ use zk_evm::{ }, }; -use crate::history_recorder::AppDataFrameManagerWithHistory; +use crate::history_recorder::{AppDataFrameManagerWithHistory, FrameManager, WithHistory}; #[derive(Debug, Default, Clone, PartialEq)] pub struct InMemoryEventSink { @@ -119,6 +119,24 @@ impl InMemoryEventSink { (events, l1_messages) } + + pub fn get_size(&self) -> usize { + self.frames_stack + .inner() + .get_frames() + .iter() + .map(|frame| { + (frame.forward.len() + frame.rollbacks.len()) * std::mem::size_of::() + }) + .sum::() + } + + pub fn get_history_size(&self) -> usize { + self.frames_stack.history().len() + * std::mem::size_of::< + > as WithHistory>::HistoryRecord, + >() + } } impl EventSink for InMemoryEventSink { diff --git a/core/lib/vm/src/history_recorder.rs b/core/lib/vm/src/history_recorder.rs index 1f673675957f..2dabb13be173 100644 --- a/core/lib/vm/src/history_recorder.rs +++ b/core/lib/vm/src/history_recorder.rs @@ -197,6 +197,10 @@ where pub fn len(&self) -> usize { self.frame_stack.len() } + + pub fn get_frames(&self) -> &[T] { + &self.frame_stack + } } impl HistoryRecorder> { @@ -569,7 +573,6 @@ impl HistoryRecorder { } #[derive(Debug)] - pub struct StorageWrapper<'a> { storage_ptr: StoragePtr<'a>, } diff --git a/core/lib/vm/src/lib.rs b/core/lib/vm/src/lib.rs index fcfb5eda9814..2126a022fe80 100644 --- a/core/lib/vm/src/lib.rs +++ b/core/lib/vm/src/lib.rs @@ -5,10 +5,11 @@ mod errors; pub mod event_sink; mod events; mod history_recorder; -mod memory; +pub mod memory; mod oracle_tools; pub mod oracles; mod pubdata_utils; +mod refunds; pub mod storage; pub mod test_utils; pub mod transaction_data; diff --git a/core/lib/vm/src/memory.rs b/core/lib/vm/src/memory.rs index 88582aa14e83..105670226ed2 100644 --- a/core/lib/vm/src/memory.rs +++ b/core/lib/vm/src/memory.rs @@ -4,7 +4,9 @@ use zk_evm::vm_state::PrimitiveValue; use zk_evm::zkevm_opcode_defs::FatPointer; use zksync_types::U256; -use crate::history_recorder::{IntFrameManagerWithHistory, MemoryWithHistory}; +use crate::history_recorder::{ + FrameManager, IntFrameManagerWithHistory, MemoryWithHistory, MemoryWrapper, WithHistory, +}; use crate::oracles::OracleWithHistory; use crate::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; @@ -116,6 +118,35 @@ impl SimpleMemory { result } + + pub fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let memory_size = self + .memory + .inner() + .memory + .iter() + .map(|page| page.len() * std::mem::size_of::<(usize, PrimitiveValue)>()) + .sum::(); + let observable_pages_size = self + .observable_pages + .inner() + .get_frames() + .iter() + .map(|frame| frame.len() * std::mem::size_of::()) + .sum::(); + + memory_size + observable_pages_size + } + + pub fn get_history_size(&self) -> usize { + let memory_size = self.memory.history().len() + * std::mem::size_of::<::HistoryRecord>(); + let observable_pages_size = self.observable_pages.history().len() + * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); + + memory_size + observable_pages_size + } } impl Memory for SimpleMemory { @@ -124,14 +155,27 @@ impl Memory for SimpleMemory { _monotonic_cycle_counter: u32, mut query: MemoryQuery, ) -> MemoryQuery { - // The following assertion works fine even when doing a read - // from heap through pointer, since `value_is_pointer` can only be set to - // `true` during memory writes. - if query.location.memory_type != MemoryType::Stack { - assert!( - !query.value_is_pointer, - "Pointers can only be stored on stack" - ); + match query.location.memory_type { + MemoryType::Stack => {} + MemoryType::Heap | MemoryType::AuxHeap => { + // The following assertion works fine even when doing a read + // from heap through pointer, since `value_is_pointer` can only be set to + // `true` during memory writes. + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::FatPointer => { + assert!(!query.rw_flag); + assert!( + !query.value_is_pointer, + "Pointers can only be stored on stack" + ); + } + MemoryType::Code => { + unreachable!("code should be through specialized query"); + } } let page = query.location.page.0 as usize; @@ -161,6 +205,7 @@ impl Memory for SimpleMemory { _monotonic_cycle_counter: u32, mut query: MemoryQuery, ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); assert!( !query.value_is_pointer, "Pointers are not used for decommmits" @@ -193,6 +238,7 @@ impl Memory for SimpleMemory { _monotonic_cycle_counter: u32, mut query: MemoryQuery, ) -> MemoryQuery { + assert_eq!(query.location.memory_type, MemoryType::Code); assert!( !query.value_is_pointer, "Pointers are not used for decommmits" @@ -237,7 +283,7 @@ impl Memory for SimpleMemory { returndata_fat_pointer: FatPointer, timestamp: Timestamp, ) { - // Safe to unwrap here, since `finish_global_frame` is never called with empty stack + // Safe to unwrap here, since `finish_global_frame` is never called with empty stack let current_observable_pages = self.observable_pages.drain_frame(timestamp); let returndata_page = returndata_fat_pointer.memory_page; diff --git a/core/lib/vm/src/oracle_tools.rs b/core/lib/vm/src/oracle_tools.rs index ea28c2491e9f..571d7d0827f3 100644 --- a/core/lib/vm/src/oracle_tools.rs +++ b/core/lib/vm/src/oracle_tools.rs @@ -8,7 +8,7 @@ use crate::event_sink::InMemoryEventSink; use crate::oracles::decommitter::DecommitterOracle; use crate::oracles::precompile::PrecompilesProcessorWithHistory; use crate::oracles::storage::StorageOracle; -use crate::storage::{Storage, StoragePtr}; +use crate::storage::Storage; use zk_evm::witness_trace::DummyTracer; #[derive(Debug)] @@ -19,7 +19,6 @@ pub struct OracleTools<'a, const B: bool> { pub precompiles_processor: PrecompilesProcessorWithHistory, pub decommittment_processor: DecommitterOracle<'a, B>, pub witness_tracer: DummyTracer, - pub storage_view: StoragePtr<'a>, } impl<'a> OracleTools<'a, false> { @@ -33,7 +32,6 @@ impl<'a> OracleTools<'a, false> { precompiles_processor: PrecompilesProcessorWithHistory::default(), decommittment_processor: DecommitterOracle::new(pointer.clone()), witness_tracer: DummyTracer {}, - storage_view: pointer, } } } diff --git a/core/lib/vm/src/oracles/decommitter.rs b/core/lib/vm/src/oracles/decommitter.rs index e0c795879146..5ffafabcf3f1 100644 --- a/core/lib/vm/src/oracles/decommitter.rs +++ b/core/lib/vm/src/oracles/decommitter.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use crate::history_recorder::HistoryRecorder; +use crate::history_recorder::{HistoryRecorder, WithHistory}; use crate::storage::StoragePtr; use zk_evm::abstractions::MemoryType; @@ -103,6 +103,41 @@ impl<'a, const B: bool> DecommitterOracle<'a, B> { pub fn get_storage(&self) -> StoragePtr<'a> { self.storage.clone() } + + pub fn get_size(&self) -> usize { + // Hashmap memory overhead is neglected. + let known_bytecodes_size = self + .known_bytecodes + .inner() + .iter() + .map(|(_, value)| value.len() * std::mem::size_of::()) + .sum::(); + let decommitted_code_hashes_size = + self.decommitted_code_hashes.inner().len() * std::mem::size_of::<(U256, u32)>(); + + known_bytecodes_size + decommitted_code_hashes_size + } + + pub fn get_history_size(&self) -> usize { + let known_bytecodes_stack_size = self.known_bytecodes.history().len() + * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); + let known_bytecodes_heap_size = self + .known_bytecodes + .history() + .iter() + .map(|(_, event)| { + if let Some(bytecode) = event.value.as_ref() { + bytecode.len() * std::mem::size_of::() + } else { + 0 + } + }) + .sum::(); + let decommitted_code_hashes_size = self.decommitted_code_hashes.history().len() + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + + known_bytecodes_stack_size + known_bytecodes_heap_size + decommitted_code_hashes_size + } } impl<'a, const B: bool> OracleWithHistory for DecommitterOracle<'a, B> { @@ -168,7 +203,7 @@ impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { for (i, value) in values.iter().enumerate() { tmp_q.location.index = MemoryIndex(i as u32); tmp_q.value = *value; - memory.execute_partial_query(monotonic_cycle_counter, tmp_q); + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); } (partial_query, Some(values)) @@ -176,7 +211,7 @@ impl<'a, const B: bool> DecommittmentProcessor for DecommitterOracle<'a, B> { for (i, value) in values.into_iter().enumerate() { tmp_q.location.index = MemoryIndex(i as u32); tmp_q.value = value; - memory.execute_partial_query(monotonic_cycle_counter, tmp_q); + memory.specialized_code_query(monotonic_cycle_counter, tmp_q); } (partial_query, None) diff --git a/core/lib/vm/src/oracles/storage.rs b/core/lib/vm/src/oracles/storage.rs index 9f0c23cfbf69..afb1c8bbff30 100644 --- a/core/lib/vm/src/oracles/storage.rs +++ b/core/lib/vm/src/oracles/storage.rs @@ -3,7 +3,8 @@ use std::collections::HashMap; use crate::storage::StoragePtr; use crate::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, + AppDataFrameManagerWithHistory, FrameManager, HashMapHistoryEvent, HistoryRecorder, + StorageWrapper, WithHistory, }; use zk_evm::abstractions::RefundedAmounts; @@ -13,8 +14,10 @@ use zk_evm::{ aux_structures::{LogQuery, Timestamp}, reference_impls::event_sink::ApplicationData, }; +use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, U256, + AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, + U256, }; use zksync_utils::u256_to_h256; @@ -70,6 +73,7 @@ impl<'a> StorageOracle<'a> { fn is_storage_key_free(&self, key: &StorageKey) -> bool { key.address() == &zksync_config::constants::SYSTEM_CONTEXT_ADDRESS + || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) } pub fn read_value(&mut self, mut query: LogQuery) -> LogQuery { @@ -157,6 +161,35 @@ impl<'a> StorageOracle<'a> { base_cost - already_paid } } + + pub fn get_size(&self) -> usize { + let frames_stack_size = self + .frames_stack + .inner() + .get_frames() + .iter() + .map(|frame| { + (frame.rollbacks.len() + frame.forward.len()) + * std::mem::size_of::() + }) + .sum::(); + let paid_changes_size = + self.paid_changes.inner().len() * std::mem::size_of::<(StorageKey, u32)>(); + + frames_stack_size + paid_changes_size + } + + pub fn get_history_size(&self) -> usize { + let storage_size = self.storage.history().len() + * std::mem::size_of::<::HistoryRecord>(); + let frames_stack_size = self.frames_stack.history().len() + * std::mem::size_of::< + > as WithHistory>::HistoryRecord, + >(); + let paid_changes_size = self.paid_changes.history().len() + * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + storage_size + frames_stack_size + paid_changes_size + } } impl<'a> VmStorageOracle for StorageOracle<'a> { @@ -281,7 +314,6 @@ impl<'a> VmStorageOracle for StorageOracle<'a> { } fn get_pubdata_price_bytes(_query: &LogQuery, is_initial: bool) -> u32 { - // should cost less. if is_initial { zk_evm::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32 } else { diff --git a/core/lib/vm/src/oracles/tracer/bootloader.rs b/core/lib/vm/src/oracles/tracer/bootloader.rs new file mode 100644 index 000000000000..49ac06cd5258 --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/bootloader.rs @@ -0,0 +1,93 @@ +use crate::memory::SimpleMemory; +use crate::oracles::tracer::{ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer}; + +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::{ErrorFlags, VmLocalState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{Opcode, RetOpcode}, +}; + +/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Also, saves the information if this `ret` was caused by "out of gas" panic. +#[derive(Debug, Clone, Default)] +pub struct BootloaderTracer { + is_bootloader_out_of_gas: bool, + ret_from_the_bootloader: Option, +} + +impl Tracer for BootloaderTracer { + const CALL_AFTER_DECODING: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + state: VmLocalStateData<'_>, + data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + // We should check not only for the `NOT_ENOUGH_ERGS` flag but if the current frame is bootloader too. + if Self::current_frame_is_bootloader(state.vm_local_state) + && data + .error_flags_accumulated + .contains(ErrorFlags::NOT_ENOUGH_ERGS) + { + self.is_bootloader_out_of_gas = true; + } + } + + fn before_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: BeforeExecutionData, + _memory: &Self::SupportedMemory, + ) { + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + _data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + // Decodes next opcode. + // `self` is passed as `tracer`, so `self.after_decoding` will be called and it will catch "out of gas". + let (next_opcode, _, _) = + zk_evm::vm_state::read_and_decode(state.vm_local_state, memory, &mut DummyTracer, self); + if Self::current_frame_is_bootloader(state.vm_local_state) { + if let Opcode::Ret(ret) = next_opcode.inner.variant.opcode { + self.ret_from_the_bootloader = Some(ret); + } + } + } +} + +impl ExecutionEndTracer for BootloaderTracer { + fn should_stop_execution(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Ok) + } +} + +impl PendingRefundTracer for BootloaderTracer {} +impl PubdataSpentTracer for BootloaderTracer {} + +impl BootloaderTracer { + fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { + // The current frame is bootloader if the callstack depth is 1. + // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior + // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. + local_state.callstack.inner.len() == 1 + } + + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.is_bootloader_out_of_gas + } + + pub fn bootloader_panicked(&self) -> bool { + self.ret_from_the_bootloader == Some(RetOpcode::Panic) + } +} diff --git a/core/lib/vm/src/oracles/tracer/mod.rs b/core/lib/vm/src/oracles/tracer/mod.rs new file mode 100644 index 000000000000..57464bb221e4 --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/mod.rs @@ -0,0 +1,38 @@ +use crate::memory::SimpleMemory; +use zk_evm::abstractions::Tracer; +use zk_evm::vm_state::VmLocalState; + +mod bootloader; +mod one_tx; +mod transaction_result; +mod utils; +mod validation; + +pub use bootloader::BootloaderTracer; +pub use one_tx::OneTxTracer; +pub use validation::{ValidationError, ValidationTracer, ValidationTracerParams}; + +pub(crate) use transaction_result::TransactionResultTracer; + +pub trait ExecutionEndTracer: Tracer { + // Returns whether the vm execution should stop. + fn should_stop_execution(&self) -> bool; +} + +pub trait PendingRefundTracer: Tracer { + // Some(x) means that the bootloader has asked the operator to provide the refund for the + // transaction, where `x` is the refund that the bootloader has suggested on its own. + fn requested_refund(&self) -> Option { + None + } + + // Set the current request for refund as fulfilled + fn set_refund_as_done(&mut self) {} +} + +pub trait PubdataSpentTracer: Tracer { + // Returns how much gas was spent on pubdata. + fn gas_spent_on_pubdata(&self, _vm_local_state: &VmLocalState) -> u32 { + 0 + } +} diff --git a/core/lib/vm/src/oracles/tracer/one_tx.rs b/core/lib/vm/src/oracles/tracer/one_tx.rs new file mode 100644 index 000000000000..003d8abf3e92 --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/one_tx.rs @@ -0,0 +1,152 @@ +use super::utils::{computational_gas_price, print_debug_if_needed}; +use crate::{ + memory::SimpleMemory, + oracles::tracer::{ + utils::VmHook, BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, + PubdataSpentTracer, + }, + vm::get_vm_hook_params, +}; + +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, + zkevm_opcode_defs::{LogOpcode, Opcode}, +}; +use zksync_config::constants::{KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS}; + +/// Allows any opcodes, but tells the VM to end the execution once the tx is over. +#[derive(Debug, Clone)] +pub struct OneTxTracer { + tx_has_been_processed: bool, + + // Some(x) means that the bootloader has asked the operator + // to provide the refund the user, where `x` is the refund proposed + // by the bootloader itself. + pending_operator_refund: Option, + + pub refund_gas: u32, + pub gas_spent_on_bytecodes_and_long_messages: u32, + + computational_gas_used: u32, + computational_gas_limit: u32, + in_account_validation: bool, + + bootloader_tracer: BootloaderTracer, +} + +impl Tracer for OneTxTracer { + const CALL_BEFORE_EXECUTION: bool = true; + const CALL_AFTER_EXECUTION: bool = true; + type SupportedMemory = SimpleMemory; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + if self.in_account_validation { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + } + + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + match hook { + VmHook::TxHasEnded => self.tx_has_been_processed = true, + VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), + VmHook::AskOperatorForRefund => { + self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) + } + VmHook::NoValidationEntered => self.in_account_validation = false, + VmHook::AccountValidationEntered => self.in_account_validation = true, + _ => {} + } + + if data.opcode.variant.opcode == Opcode::Log(LogOpcode::PrecompileCall) { + let current_stack = state.vm_local_state.callstack.get_current_stack(); + // Trace for precompile calls from `KNOWN_CODES_STORAGE_ADDRESS` and `L1_MESSENGER_ADDRESS` that burn some gas. + // Note, that if there is less gas left than requested to burn it will be burnt anyway. + if current_stack.this_address == KNOWN_CODES_STORAGE_ADDRESS + || current_stack.this_address == L1_MESSENGER_ADDRESS + { + self.gas_spent_on_bytecodes_and_long_messages += + std::cmp::min(data.src1_value.value.as_u32(), current_stack.ergs_remaining); + } + } + } + + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &Self::SupportedMemory, + ) { + self.bootloader_tracer.after_execution(state, data, memory) + } +} + +impl ExecutionEndTracer for OneTxTracer { + fn should_stop_execution(&self) -> bool { + self.tx_has_been_processed + || self.bootloader_tracer.should_stop_execution() + || self.validation_run_out_of_gas() + } +} + +impl PendingRefundTracer for OneTxTracer { + fn requested_refund(&self) -> Option { + self.pending_operator_refund + } + + fn set_refund_as_done(&mut self) { + self.pending_operator_refund = None; + } +} + +impl PubdataSpentTracer for OneTxTracer { + fn gas_spent_on_pubdata(&self, vm_local_state: &VmLocalState) -> u32 { + self.gas_spent_on_bytecodes_and_long_messages + vm_local_state.spent_pubdata_counter + } +} + +impl OneTxTracer { + pub fn new(computational_gas_limit: u32) -> Self { + Self { + tx_has_been_processed: false, + pending_operator_refund: None, + refund_gas: 0, + gas_spent_on_bytecodes_and_long_messages: 0, + computational_gas_used: 0, + computational_gas_limit, + in_account_validation: false, + bootloader_tracer: BootloaderTracer::default(), + } + } + + pub fn is_bootloader_out_of_gas(&self) -> bool { + self.bootloader_tracer.is_bootloader_out_of_gas() + } + + pub fn tx_has_been_processed(&self) -> bool { + self.tx_has_been_processed + } + + pub fn validation_run_out_of_gas(&self) -> bool { + self.computational_gas_used > self.computational_gas_limit + } +} diff --git a/core/lib/vm/src/oracles/tracer/transaction_result.rs b/core/lib/vm/src/oracles/tracer/transaction_result.rs new file mode 100644 index 000000000000..d68d8b3e31d4 --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/transaction_result.rs @@ -0,0 +1,76 @@ +use zk_evm::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + witness_trace::VmWitnessTracer, + zkevm_opcode_defs::decoding::VmEncodingMode, + zkevm_opcode_defs::FatPointer, +}; +use zksync_types::U256; + +use crate::memory::SimpleMemory; +use crate::oracles::tracer::utils::{print_debug_if_needed, read_pointer, VmHook}; +use crate::oracles::tracer::{ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer}; +use crate::vm::get_vm_hook_params; + +#[derive(Debug, Clone, Default)] +pub(crate) struct TransactionResultTracer { + pub(crate) revert_reason: Option>, +} + +impl> VmWitnessTracer for TransactionResultTracer {} + +impl Tracer for TransactionResultTracer { + type SupportedMemory = SimpleMemory; + const CALL_BEFORE_EXECUTION: bool = true; + + fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} + fn after_decoding( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterDecodingData, + _memory: &Self::SupportedMemory, + ) { + } + fn before_execution( + &mut self, + state: VmLocalStateData<'_>, + data: BeforeExecutionData, + memory: &Self::SupportedMemory, + ) { + let hook = VmHook::from_opcode_memory(&state, &data); + print_debug_if_needed(&hook, &state, memory); + + if matches!(hook, VmHook::ExecutionResult) { + let vm_hook_params = get_vm_hook_params(memory); + + let success = vm_hook_params[0]; + let returndata_ptr = FatPointer::from_u256(vm_hook_params[1]); + let returndata = read_pointer(memory, returndata_ptr); + + if success == U256::zero() { + self.revert_reason = Some(returndata); + } else { + self.revert_reason = None; + } + } + } + fn after_execution( + &mut self, + _state: VmLocalStateData<'_>, + _data: AfterExecutionData, + _memory: &Self::SupportedMemory, + ) { + } +} + +impl ExecutionEndTracer for TransactionResultTracer { + fn should_stop_execution(&self) -> bool { + // This tracer will not prevent the execution from going forward + // until the end of the block. + false + } +} + +impl PendingRefundTracer for TransactionResultTracer {} +impl PubdataSpentTracer for TransactionResultTracer {} diff --git a/core/lib/vm/src/oracles/tracer/utils.rs b/core/lib/vm/src/oracles/tracer/utils.rs new file mode 100644 index 000000000000..f68df0d09e68 --- /dev/null +++ b/core/lib/vm/src/oracles/tracer/utils.rs @@ -0,0 +1,165 @@ +use crate::memory::SimpleMemory; +use crate::utils::heap_page_from_base; +use crate::vm::{get_vm_hook_params, VM_HOOK_POSITION}; +use crate::vm_with_bootloader::BOOTLOADER_HEAP_PAGE; + +use zk_evm::{ + abstractions::{BeforeExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, +}; +use zksync_config::constants::{ + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, +}; +use zksync_types::U256; +use zksync_utils::u256_to_h256; + +#[derive(Clone, Debug, Copy)] +pub(crate) enum VmHook { + AccountValidationEntered, + PaymasterValidationEntered, + NoValidationEntered, + ValidationStepEndeded, + TxHasEnded, + DebugLog, + DebugReturnData, + NoHook, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + ExecutionResult, +} + +impl VmHook { + pub fn from_opcode_memory(state: &VmLocalStateData<'_>, data: &BeforeExecutionData) -> Self { + let opcode_variant = data.opcode.variant; + let heap_page = + heap_page_from_base(state.vm_local_state.callstack.current.base_memory_page).0; + + let src0_value = data.src0_value.value; + + let fat_ptr = FatPointer::from_u256(src0_value); + + let value = data.src1_value.value; + + // Only UMA opcodes in the bootloader serve for vm hooks + if !matches!(opcode_variant.opcode, Opcode::UMA(UMAOpcode::HeapWrite)) + || heap_page != BOOTLOADER_HEAP_PAGE + || fat_ptr.offset != VM_HOOK_POSITION * 32 + { + return Self::NoHook; + } + + match value.as_u32() { + 0 => Self::AccountValidationEntered, + 1 => Self::PaymasterValidationEntered, + 2 => Self::NoValidationEntered, + 3 => Self::ValidationStepEndeded, + 4 => Self::TxHasEnded, + 5 => Self::DebugLog, + 6 => Self::DebugReturnData, + 7 => Self::NearCallCatch, + 8 => Self::AskOperatorForRefund, + 9 => Self::NotifyAboutRefund, + 10 => Self::ExecutionResult, + _ => panic!("Unkown hook"), + } + } +} + +pub(crate) fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory) + .into_iter() + .map(u256_to_h256) + .collect(); + let msg = vm_hook_params[0].as_bytes().to_vec(); + let data = vm_hook_params[1].as_bytes().to_vec(); + + let msg = String::from_utf8(msg).expect("Invalid debug message"); + let data = U256::from_big_endian(&data); + + // For long data, it is better to use hex-encoding for greater readibility + let data_str = if data > U256::from(u64::max_value()) { + let mut bytes = [0u8; 32]; + data.to_big_endian(&mut bytes); + format!("0x{}", hex::encode(bytes)) + } else { + data.to_string() + }; + + let tx_id = state.vm_local_state.tx_number_in_block; + + format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) +} + +/// Reads the memory slice represented by the fat pointer. +/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec { + let FatPointer { + offset, + length, + start, + memory_page, + } = pointer; + + // The actual bounds of the returndata ptr is [start+offset..start+length] + let mem_region_start = start + offset; + let mem_region_length = length - offset; + + memory.read_unaligned_bytes( + memory_page as usize, + mem_region_start as usize, + mem_region_length as usize, + ) +} + +/// Outputs the returndata for the latest call. +/// This is usually used to output the revert reason. +pub(crate) fn get_debug_returndata(memory: &SimpleMemory) -> String { + let vm_hook_params: Vec<_> = get_vm_hook_params(memory); + let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); + let returndata = read_pointer(memory, returndata_ptr); + + format!("0x{}", hex::encode(returndata)) +} + +/// Accepts a vm hook and, if it requires to output some debug log, outputs it. +pub(crate) fn print_debug_if_needed( + hook: &VmHook, + state: &VmLocalStateData<'_>, + memory: &SimpleMemory, +) { + let log = match hook { + VmHook::DebugLog => get_debug_log(state, memory), + VmHook::DebugReturnData => get_debug_returndata(memory), + _ => return, + }; + + vlog::trace!("{}", log); +} + +pub(crate) fn computational_gas_price( + state: VmLocalStateData<'_>, + data: &BeforeExecutionData, +) -> u32 { + // We calculate computational gas used as a raw price for opcode plus cost for precompiles. + // This calculation is incomplete as it misses decommitment and memory growth costs. + // To calculate decommitment cost we need an access to decommitter oracle which is missing in tracer now. + // Memory growth calculation is complex and it will require different logic for different opcodes (`FarCall`, `Ret`, `UMA`). + let base_price = data.opcode.inner.variant.ergs_price(); + let precompile_price = match data.opcode.variant.opcode { + Opcode::Log(LogOpcode::PrecompileCall) => { + let address = state.vm_local_state.callstack.current.this_address; + + if address == KECCAK256_PRECOMPILE_ADDRESS + || address == SHA256_PRECOMPILE_ADDRESS + || address == ECRECOVER_PRECOMPILE_ADDRESS + { + data.src1_value.value.low_u32() + } else { + 0 + } + } + _ => 0, + }; + base_price + precompile_price +} diff --git a/core/lib/vm/src/oracles/tracer.rs b/core/lib/vm/src/oracles/tracer/validation.rs similarity index 59% rename from core/lib/vm/src/oracles/tracer.rs rename to core/lib/vm/src/oracles/tracer/validation.rs index 4fe7c798c643..215b1db8ebaf 100644 --- a/core/lib/vm/src/oracles/tracer.rs +++ b/core/lib/vm/src/oracles/tracer/validation.rs @@ -1,101 +1,40 @@ -use std::{ - collections::HashSet, - fmt::{self, Display}, -}; +use std::collections::HashSet; +use std::fmt; +use std::fmt::Display; use crate::{ errors::VmRevertReasonParsingResult, memory::SimpleMemory, - storage::StoragePtr, + oracles::tracer::{ + utils::{computational_gas_price, print_debug_if_needed, VmHook}, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + }, utils::{aux_heap_page_from_base, heap_page_from_base}, - vm::{get_vm_hook_params, VM_HOOK_POSITION}, - vm_with_bootloader::BOOTLOADER_HEAP_PAGE, }; -// use zk_evm::testing::memory::SimpleMemory; + use zk_evm::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, aux_structures::MemoryPage, - vm_state::{ErrorFlags, VmLocalState}, - witness_trace::{DummyTracer, VmWitnessTracer}, - zkevm_opcode_defs::{ - decoding::VmEncodingMode, ContextOpcode, FarCallABI, FarCallForwardPageType, FatPointer, - LogOpcode, Opcode, RetOpcode, UMAOpcode, - }, + zkevm_opcode_defs::{ContextOpcode, FarCallABI, FarCallForwardPageType, LogOpcode, Opcode}, }; -use zksync_types::{ - get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, - ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, H256, + +use crate::storage::StoragePtr; +use zksync_config::constants::{ + ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, - SYSTEM_CONTEXT_ADDRESS, U256, + SYSTEM_CONTEXT_ADDRESS, +}; +use zksync_types::{ + get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; use zksync_utils::{ be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, }; -pub trait ExecutionEndTracer: Tracer { - // Returns whether the vm execution should stop. - fn should_stop_execution(&self) -> bool; -} - -pub trait PendingRefundTracer: Tracer { - // Some(x) means that the bootloader has asked the operator to provide the refund for the - // transaction, where `x` is the refund that the bootloader has suggested on its own. - fn requested_refund(&self) -> Option { - None - } - - // Set the current request for refund as fulfilled - fn set_refund_as_done(&mut self) {} -} - -#[derive(Debug, Clone)] -pub struct NoopMemoryTracer; - -impl> VmWitnessTracer for NoopMemoryTracer {} - -impl Tracer for NoopMemoryTracer { - type SupportedMemory = SimpleMemory; - const CALL_BEFORE_EXECUTION: bool = true; - - fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} - fn after_decoding( - &mut self, - _state: VmLocalStateData<'_>, - _data: AfterDecodingData, - _memory: &Self::SupportedMemory, - ) { - } - fn before_execution( - &mut self, - state: VmLocalStateData<'_>, - data: BeforeExecutionData, - memory: &Self::SupportedMemory, - ) { - let hook = VmHook::from_opcode_memory(&state, &data); - print_debug_if_needed(&hook, &state, memory); - } - fn after_execution( - &mut self, - _state: VmLocalStateData<'_>, - _data: AfterExecutionData, - _memory: &Self::SupportedMemory, - ) { - } -} - -impl ExecutionEndTracer for NoopMemoryTracer { - fn should_stop_execution(&self) -> bool { - // This tracer will not prevent the execution from going forward - // until the end of the block. - false - } -} - -impl PendingRefundTracer for NoopMemoryTracer {} - #[derive(Debug, Clone, Eq, PartialEq, Copy)] +#[allow(clippy::enum_variant_names)] pub enum ValidationTracerMode { // Should be activated when the transaction is being validated by user. UserTxValidation, @@ -110,6 +49,7 @@ pub enum ViolatedValidationRule { TouchedUnallowedStorageSlots(Address, U256), CalledContractWithNoCode(Address), TouchedUnallowedContext, + TookTooManyComputationalGas(u32), } pub enum ValidationError { @@ -132,6 +72,13 @@ impl Display for ViolatedValidationRule { ViolatedValidationRule::TouchedUnallowedContext => { write!(f, "Touched unallowed context") } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {}", + gas_limit + ) + } } } } @@ -196,6 +143,8 @@ pub struct ValidationTracer<'a> { trusted_slots: HashSet<(Address, U256)>, trusted_addresses: HashSet
, trusted_address_slots: HashSet<(Address, U256)>, + computational_gas_used: u32, + computational_gas_limit: u32, } impl fmt::Debug for ValidationTracer<'_> { @@ -227,6 +176,8 @@ pub struct ValidationTracerParams { /// They are needed to work correctly with beacon proxy, where the address of the implementation is /// stored in the beacon. pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, } #[derive(Debug, Clone, Default)] @@ -251,6 +202,8 @@ impl<'a> ValidationTracer<'a> { trusted_slots: params.trusted_slots, trusted_addresses: params.trusted_addresses, trusted_address_slots: params.trusted_address_slots, + computational_gas_used: 0, + computational_gas_limit: params.computational_gas_limit, } } @@ -354,6 +307,12 @@ impl<'a> ValidationTracer<'a> { data: BeforeExecutionData, memory: &SimpleMemory, ) -> ValidationRoundResult { + if self.computational_gas_used > self.computational_gas_limit { + return Err(ViolatedValidationRule::TookTooManyComputationalGas( + self.computational_gas_limit, + )); + } + let opcode_variant = data.opcode.variant; match opcode_variant.opcode { Opcode::FarCall(_) => { @@ -403,6 +362,7 @@ impl<'a> ValidationTracer<'a> { return Err(ViolatedValidationRule::TouchedUnallowedContext); } ContextOpcode::ErgsLeft => { + // T } _ => {} } @@ -458,6 +418,10 @@ impl Tracer for ValidationTracer<'_> { ) { // For now, we support only validations for users. if let ValidationTracerMode::UserTxValidation = self.validation_mode { + self.computational_gas_used = self + .computational_gas_used + .saturating_add(computational_gas_price(state, &data)); + let validation_round_result = self.check_user_restrictions(state, data, memory); self.process_validation_round_result(validation_round_result); } @@ -520,289 +484,4 @@ impl ExecutionEndTracer for ValidationTracer<'_> { } impl PendingRefundTracer for ValidationTracer<'_> {} - -/// Allows any opcodes, but tells the VM to end the execution once the tx is over. -#[derive(Debug, Clone, Default)] -pub struct OneTxTracer { - tx_has_been_processed: bool, - - // Some(x) means that the bootloader has asked the operator - // to provide the refund the user, where `x` is the refund proposed - // by the bootloader itself. - pending_operator_refund: Option, - - pub operator_suggested_refund_gas: u32, - - pub refund_gas: u32, - bootloader_tracer: BootloaderTracer, -} - -impl Tracer for OneTxTracer { - const CALL_BEFORE_EXECUTION: bool = true; - const CALL_AFTER_EXECUTION: bool = true; - type SupportedMemory = SimpleMemory; - - fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} - fn after_decoding( - &mut self, - _state: VmLocalStateData<'_>, - _data: AfterDecodingData, - _memory: &Self::SupportedMemory, - ) { - } - - fn before_execution( - &mut self, - state: VmLocalStateData<'_>, - data: BeforeExecutionData, - memory: &Self::SupportedMemory, - ) { - let hook = VmHook::from_opcode_memory(&state, &data); - print_debug_if_needed(&hook, &state, memory); - - match hook { - VmHook::TxHasEnded => self.tx_has_been_processed = true, - VmHook::NotifyAboutRefund => self.refund_gas = get_vm_hook_params(memory)[0].as_u32(), - VmHook::AskOperatorForRefund => { - self.pending_operator_refund = Some(get_vm_hook_params(memory)[0].as_u32()) - } - _ => {} - } - } - - fn after_execution( - &mut self, - state: VmLocalStateData<'_>, - data: AfterExecutionData, - memory: &Self::SupportedMemory, - ) { - self.bootloader_tracer.after_execution(state, data, memory) - } -} - -impl ExecutionEndTracer for OneTxTracer { - fn should_stop_execution(&self) -> bool { - self.tx_has_been_processed || self.bootloader_tracer.should_stop_execution() - } -} - -impl PendingRefundTracer for OneTxTracer { - fn requested_refund(&self) -> Option { - self.pending_operator_refund - } - - fn set_refund_as_done(&mut self) { - self.pending_operator_refund = None; - } -} - -impl OneTxTracer { - pub fn is_bootloader_out_of_gas(&self) -> bool { - self.bootloader_tracer.is_bootloader_out_of_gas() - } - - pub fn tx_has_been_processed(&self) -> bool { - self.tx_has_been_processed - } -} - -/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. -/// Also, saves the information if this `ret` was caused by "out of gas" panic. -#[derive(Debug, Clone, Default)] -pub struct BootloaderTracer { - is_bootloader_out_of_gas: bool, - ret_from_the_bootloader: Option, -} - -impl Tracer for BootloaderTracer { - const CALL_AFTER_DECODING: bool = true; - const CALL_AFTER_EXECUTION: bool = true; - type SupportedMemory = SimpleMemory; - - fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} - fn after_decoding( - &mut self, - state: VmLocalStateData<'_>, - data: AfterDecodingData, - _memory: &Self::SupportedMemory, - ) { - // We should check not only for the `NOT_ENOUGH_ERGS` flag but if the current frame is bootloader too. - if Self::current_frame_is_bootloader(state.vm_local_state) - && data - .error_flags_accumulated - .contains(ErrorFlags::NOT_ENOUGH_ERGS) - { - self.is_bootloader_out_of_gas = true; - } - } - - fn before_execution( - &mut self, - _state: VmLocalStateData<'_>, - _data: BeforeExecutionData, - _memory: &Self::SupportedMemory, - ) { - } - - fn after_execution( - &mut self, - state: VmLocalStateData<'_>, - _data: AfterExecutionData, - memory: &Self::SupportedMemory, - ) { - // Decodes next opcode. - // `self` is passed as `tracer`, so `self.after_decoding` will be called and it will catch "out of gas". - let (next_opcode, _, _) = - zk_evm::vm_state::read_and_decode(state.vm_local_state, memory, &mut DummyTracer, self); - if Self::current_frame_is_bootloader(state.vm_local_state) { - if let Opcode::Ret(ret) = next_opcode.inner.variant.opcode { - self.ret_from_the_bootloader = Some(ret); - } - } - } -} - -impl ExecutionEndTracer for BootloaderTracer { - fn should_stop_execution(&self) -> bool { - self.ret_from_the_bootloader == Some(RetOpcode::Ok) - } -} - -impl PendingRefundTracer for BootloaderTracer {} - -impl BootloaderTracer { - fn current_frame_is_bootloader(local_state: &VmLocalState) -> bool { - // The current frame is bootloader if the callstack depth is 1. - // Some of the near calls inside the bootloader can be out of gas, which is totally normal behavior - // and it shouldn't result in `is_bootloader_out_of_gas` becoming true. - local_state.callstack.inner.len() == 1 - } - - pub fn is_bootloader_out_of_gas(&self) -> bool { - self.is_bootloader_out_of_gas - } - - pub fn bootloader_panicked(&self) -> bool { - self.ret_from_the_bootloader == Some(RetOpcode::Panic) - } -} - -#[derive(Clone, Debug, Copy)] -pub(crate) enum VmHook { - AccountValidationEntered, - PaymasterValidationEntered, - NoValidationEntered, - ValidationStepEndeded, - TxHasEnded, - DebugLog, - DebugReturnData, - NoHook, - NearCallCatch, - AskOperatorForRefund, - NotifyAboutRefund, - ExecutionResult, -} - -impl VmHook { - pub fn from_opcode_memory(state: &VmLocalStateData<'_>, data: &BeforeExecutionData) -> Self { - let opcode_variant = data.opcode.variant; - let heap_page = - heap_page_from_base(state.vm_local_state.callstack.current.base_memory_page).0; - - let src0_value = data.src0_value.value; - - let fat_ptr = FatPointer::from_u256(src0_value); - - let value = data.src1_value.value; - - // Only UMA opcodes in the bootloader serve for vm hooks - if !matches!(opcode_variant.opcode, Opcode::UMA(UMAOpcode::HeapWrite)) - || heap_page != BOOTLOADER_HEAP_PAGE - || fat_ptr.offset != VM_HOOK_POSITION * 32 - { - return Self::NoHook; - } - - match value.as_u32() { - 0 => Self::AccountValidationEntered, - 1 => Self::PaymasterValidationEntered, - 2 => Self::NoValidationEntered, - 3 => Self::ValidationStepEndeded, - 4 => Self::TxHasEnded, - 5 => Self::DebugLog, - 6 => Self::DebugReturnData, - 7 => Self::NearCallCatch, - 8 => Self::AskOperatorForRefund, - 9 => Self::NotifyAboutRefund, - 10 => Self::ExecutionResult, - _ => panic!("Unkown hook"), - } - } -} - -fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String { - let vm_hook_params: Vec<_> = get_vm_hook_params(memory) - .into_iter() - .map(u256_to_h256) - .collect(); - let msg = vm_hook_params[0].as_bytes().to_vec(); - let data = vm_hook_params[1].as_bytes().to_vec(); - - let msg = String::from_utf8(msg).expect("Invalid debug message"); - let data = U256::from_big_endian(&data); - - // For long data, it is better to use hex-encoding for greater readibility - let data_str = if data > U256::from(u64::max_value()) { - let mut bytes = [0u8; 32]; - data.to_big_endian(&mut bytes); - format!("0x{}", hex::encode(bytes)) - } else { - data.to_string() - }; - - let tx_id = state.vm_local_state.tx_number_in_block; - - format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) -} - -/// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). -pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec { - let FatPointer { - offset, - length, - start, - memory_page, - } = pointer; - - // The actual bounds of the returndata ptr is [start+offset..start+length] - let mem_region_start = start + offset; - let mem_region_length = length - offset; - - memory.read_unaligned_bytes( - memory_page as usize, - mem_region_start as usize, - mem_region_length as usize, - ) -} - -/// Outputs the returndata for the latest call. -/// This is usually used to output the revert reason. -fn get_debug_returndata(memory: &SimpleMemory) -> String { - let vm_hook_params: Vec<_> = get_vm_hook_params(memory); - let returndata_ptr = FatPointer::from_u256(vm_hook_params[0]); - let returndata = read_pointer(memory, returndata_ptr); - - format!("0x{}", hex::encode(returndata)) -} - -/// Accepts a vm hook and, if it requires to output some debug log, outputs it. -fn print_debug_if_needed(hook: &VmHook, state: &VmLocalStateData<'_>, memory: &SimpleMemory) { - let log = match hook { - VmHook::DebugLog => get_debug_log(state, memory), - VmHook::DebugReturnData => get_debug_returndata(memory), - _ => return, - }; - - vlog::trace!("{}", log); -} +impl PubdataSpentTracer for ValidationTracer<'_> {} diff --git a/core/lib/vm/src/pubdata_utils.rs b/core/lib/vm/src/pubdata_utils.rs index 6051c03686f6..a384d50cd4f3 100644 --- a/core/lib/vm/src/pubdata_utils.rs +++ b/core/lib/vm/src/pubdata_utils.rs @@ -4,16 +4,22 @@ use crate::VmInstance; use std::collections::HashMap; use zk_evm::aux_structures::Timestamp; use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::StorageKey; +use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; use zksync_utils::bytecode::bytecode_len_in_bytes; impl<'a> VmInstance<'a> { - pub fn pubdata_used(&self, from_timestamp: Timestamp) -> u32 { - let storage_writes_pubdata_used = self.pubdata_used_for_writes(from_timestamp); + pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { + let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); let (events, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); - let l2_l1_logs_bytes = (l2_to_l1_logs.len() as u32) + // For the first transaction in L1 batch there may be (it depends on the execution mode) an L2->L1 log + // that is sent by `SystemContext` in `setNewBlock`. It's a part of the L1 batch pubdata overhead and not the transaction itself. + let l2_l1_logs_bytes = (l2_to_l1_logs + .iter() + .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) + .count() as u32) * zk_evm::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) .iter() @@ -22,16 +28,18 @@ impl<'a> VmInstance<'a> { let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32) + .map(|bytecodehash| { + bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD + }) .sum(); - storage_writes_pubdata_used + storage_writes_pubdata_published + l2_l1_logs_bytes + l2_l1_long_messages_bytes + published_bytecode_bytes } - fn pubdata_used_for_writes(&self, from_timestamp: Timestamp) -> u32 { + fn pubdata_published_for_writes(&self, from_timestamp: Timestamp) -> u32 { // This `HashMap` contains how much was already paid for every slot that was paid during the last tx execution. // For the slots that weren't paid during the last tx execution we can just use // `self.state.storage.paid_changes.inner().get(&key)` to get how much it was paid before. @@ -70,15 +78,15 @@ impl<'a> VmInstance<'a> { from_timestamp, ); let (_, deduplicated_logs) = - zksync_types::log_query_sorter::sort_storage_access_queries(&storage_logs); + sort_storage_access_queries(storage_logs.iter().map(|log| &log.log_query)); deduplicated_logs .into_iter() .filter_map(|log| { - if log.log_query.rw_flag { - let key = storage_key_of_log(&log.log_query); + if log.rw_flag { + let key = storage_key_of_log(&log); let pre_paid = pre_paid_before_tx(&key); - let to_pay_by_user = self.state.storage.base_price_for_write(&log.log_query); + let to_pay_by_user = self.state.storage.base_price_for_write(&log); if to_pay_by_user > pre_paid { Some(to_pay_by_user - pre_paid) diff --git a/core/lib/vm/src/refunds.rs b/core/lib/vm/src/refunds.rs new file mode 100644 index 000000000000..9d2eab776d41 --- /dev/null +++ b/core/lib/vm/src/refunds.rs @@ -0,0 +1,198 @@ +use crate::vm_with_bootloader::{ + eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, +}; +use crate::VmInstance; +use zk_evm::aux_structures::Timestamp; +use zksync_types::U256; +use zksync_utils::ceil_div_u256; + +impl<'a> VmInstance<'a> { + pub(crate) fn tx_body_refund( + &self, + from_timestamp: Timestamp, + bootloader_refund: u32, + gas_spent_on_pubdata: u32, + ) -> u32 { + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + let tx_gas_limit = self.get_tx_gas_limit(current_tx_index); + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + vlog::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + let pubdata_published = self.pubdata_published(from_timestamp); + + // For now, bootloader charges only for base fee. + let effective_gas_price = self.block_context.base_fee; + + let bootloader_eth_price_per_pubdata_byte = U256::from(effective_gas_price) + * U256::from(self.state.local_state.current_ergs_per_pubdata_byte); + let fair_eth_price_per_pubdata_byte = U256::from(eth_price_per_pubdata_byte( + self.block_context.context.l1_gas_price, + )); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(self.block_context.context.fair_l2_gas_price) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + vlog::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u32() + } + + /// Calculates the refund for the block overhead. + /// This refund is the difference between how much user paid in advance for the block overhead + /// and how much he should pay based on actual tx execution result. + pub(crate) fn block_overhead_refund( + &self, + _from_timestamp: Timestamp, + _gas_remaining_before: u32, + _gas_spent_on_pubdata: u32, + ) -> u32 { + 0 + + // let pubdata_published = self.pubdata_published(from_timestamp); + // + // let total_gas_spent = gas_remaining_before - self.gas_remaining(); + // let gas_spent_on_computation = total_gas_spent.checked_sub(gas_spent_on_pubdata).unwrap_or_else(|| { + // vlog::error!("Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", gas_spent_on_pubdata, total_gas_spent); + // 0 + // }); + // let (_, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); + // let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + // + // let actual_overhead = Self::actual_overhead_gas( + // self.state.local_state.current_ergs_per_pubdata_byte, + // self.bootloader_state.get_tx_size(current_tx_index), + // pubdata_published, + // gas_spent_on_computation, + // self.state + // .decommittment_processor + // .get_number_of_decommitment_requests_after_timestamp(from_timestamp), + // l2_to_l1_logs.len(), + // ); + // + // let predefined_overhead = self + // .state + // .memory + // .read_slot( + // BOOTLOADER_HEAP_PAGE as usize, + // TX_OVERHEAD_OFFSET + current_tx_index, + // ) + // .value + // .as_u32(); + // + // if actual_overhead <= predefined_overhead { + // predefined_overhead - actual_overhead + // } else { + // // This should never happen but potential mistakes at the early stage should not bring the server down. + // vlog::error!( + // "Actual overhead is greater than predefined one, actual: {}, predefined: {}", + // actual_overhead, + // predefined_overhead + // ); + // 0 + // } + } + + #[allow(dead_code)] + fn actual_overhead_gas( + _gas_per_pubdata_byte_limit: u32, + _encoded_len: usize, + _pubdata_published: u32, + _gas_spent_on_computation: u32, + _number_of_decommitment_requests: usize, + _l2_l1_logs: usize, + ) -> u32 { + 0 + + // let overhead_for_block_gas = U256::from(crate::transaction_data::block_overhead_gas( + // gas_per_pubdata_byte_limit, + // )); + + // let encoded_len = U256::from(encoded_len); + // let pubdata_published = U256::from(pubdata_published); + // let gas_spent_on_computation = U256::from(gas_spent_on_computation); + // let number_of_decommitment_requests = U256::from(number_of_decommitment_requests); + // let l2_l1_logs = U256::from(l2_l1_logs); + + // let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + + // let overhead_for_length = ceil_div_u256( + // encoded_len * overhead_for_block_gas, + // BOOTLOADER_TX_ENCODING_SPACE.into(), + // ); + + // let actual_overhead_for_pubdata = ceil_div_u256( + // pubdata_published * overhead_for_block_gas, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + // let actual_gas_limit_overhead = ceil_div_u256( + // gas_spent_on_computation * overhead_for_block_gas, + // MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT.into(), + // ); + + // let code_decommitter_sorter_circuit_overhead = ceil_div_u256( + // number_of_decommitment_requests * overhead_for_block_gas, + // GEOMETRY_CONFIG.limit_for_code_decommitter_sorter.into(), + // ); + + // let l1_l2_logs_overhead = ceil_div_u256( + // l2_l1_logs * overhead_for_block_gas, + // std::cmp::min( + // GEOMETRY_CONFIG.limit_for_l1_messages_merklizer, + // GEOMETRY_CONFIG.limit_for_l1_messages_pudata_hasher, + // ) + // .into(), + // ); + + // let overhead = vec![ + // tx_slot_overhead, + // overhead_for_length, + // actual_overhead_for_pubdata, + // actual_gas_limit_overhead, + // code_decommitter_sorter_circuit_overhead, + // l1_l2_logs_overhead, + // ] + // .into_iter() + // .max() + // .unwrap(); + + // overhead.as_u32() + } + + pub(crate) fn get_tx_gas_limit(&self, tx_index: usize) -> u32 { + let tx_description_offset = self.bootloader_state.get_tx_description_offset(tx_index); + self.state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .value + .as_u32() + } +} diff --git a/core/lib/vm/src/storage.rs b/core/lib/vm/src/storage.rs index 3156f58bbbca..2aeba1140488 100644 --- a/core/lib/vm/src/storage.rs +++ b/core/lib/vm/src/storage.rs @@ -4,21 +4,20 @@ use std::fmt::Debug; use std::rc::Rc; use zksync_state::storage_view::StorageView; -use zksync_types::{Address, StorageKey, StorageValue, ZkSyncReadStorage, H256}; +use zksync_types::{get_known_code_key, StorageKey, StorageValue, ZkSyncReadStorage, H256}; pub trait Storage: Debug + Sync + Send { fn get_value(&mut self, key: &StorageKey) -> StorageValue; // Returns the original value. fn set_value(&mut self, key: &StorageKey, value: StorageValue) -> StorageValue; fn is_write_initial(&mut self, key: &StorageKey) -> bool; - fn load_contract(&mut self, address: Address) -> Option>; - fn save_contract(&mut self, address: Address, bytecode: Vec); fn load_factory_dep(&mut self, hash: H256) -> Option>; - fn save_factory_dep(&mut self, hash: H256, bytecode: Vec); fn number_of_updated_storage_slots(&self) -> usize; fn get_modified_storage_keys(&self) -> &HashMap; + + fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool; } impl Storage for StorageView { @@ -35,22 +34,10 @@ impl Storage for StorageView { self.is_write_initial(key) } - fn load_contract(&mut self, address: Address) -> Option> { - self.load_contract(address) - } - - fn save_contract(&mut self, address: Address, bytecode: Vec) { - self.save_contract(address, bytecode); - } - fn load_factory_dep(&mut self, hash: H256) -> Option> { self.load_factory_dep(hash) } - fn save_factory_dep(&mut self, hash: H256, bytecode: Vec) { - self.save_factory_dep(hash, bytecode); - } - fn number_of_updated_storage_slots(&self) -> usize { self.get_modified_storage_keys().len() } @@ -58,6 +45,13 @@ impl Storage for StorageView { fn get_modified_storage_keys(&self) -> &HashMap { self.get_modified_storage_keys() } + + /// Returns whether a bytecode hash is "known", i.e. whether + /// it has been published on L1 + fn is_bytecode_known(&mut self, bytecode_hash: &H256) -> bool { + let code_key = get_known_code_key(bytecode_hash); + self.get_value(&code_key) != H256::zero() + } } pub type StoragePtr<'a> = Rc>; diff --git a/core/lib/vm/src/tests/bootloader.rs b/core/lib/vm/src/tests/bootloader.rs index 40584f23ebf1..7484bf40ed05 100644 --- a/core/lib/vm/src/tests/bootloader.rs +++ b/core/lib/vm/src/tests/bootloader.rs @@ -7,7 +7,7 @@ use crate::errors::{VmRevertReason, VmRevertReasonParsingResult}; use crate::memory::SimpleMemory; use crate::oracles::tracer::{ - read_pointer, ExecutionEndTracer, NoopMemoryTracer, PendingRefundTracer, VmHook, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, TransactionResultTracer, }; use crate::storage::{Storage, StoragePtr}; use crate::test_utils::{ @@ -15,8 +15,8 @@ use crate::test_utils::{ mock_loadnext_test_call, VmInstanceInnerState, }; use crate::utils::{ - create_test_block_params, default_block_properties, insert_system_contracts, - read_bootloader_test_code, BLOCK_GAS_LIMIT, + create_test_block_params, insert_system_contracts, read_bootloader_test_code, + BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, }; use crate::vm::{ get_vm_hook_params, tx_has_failed, VmBlockResult, VmExecutionStopReason, ZkSyncVmState, @@ -26,7 +26,7 @@ use crate::vm_with_bootloader::{ bytecode_to_factory_dep, get_bootloader_memory, get_bootloader_memory_for_encoded_tx, init_vm_inner, push_raw_transaction_to_bootloader_memory, push_transaction_to_bootloader_memory, BlockContext, DerivedBlockContext, BOOTLOADER_HEAP_PAGE, - BOOTLOADER_TX_DESCRIPTION_OFFSET, TX_DESCRIPTION_OFFSET, + BOOTLOADER_TX_DESCRIPTION_OFFSET, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, }; use crate::vm_with_bootloader::{BlockContextMode, BootloaderJobType, TxExecutionMode}; use crate::{test_utils, VmInstance}; @@ -43,6 +43,7 @@ use zk_evm::abstractions::{ }; use zk_evm::aux_structures::Timestamp; use zk_evm::block_properties::BlockProperties; +use zk_evm::opcodes::execution::ret; use zk_evm::sha3::digest::typenum::U830; use zk_evm::witness_trace::VmWitnessTracer; use zk_evm::zkevm_opcode_defs::decoding::VmEncodingMode; @@ -50,7 +51,9 @@ use zk_evm::zkevm_opcode_defs::FatPointer; use zksync_types::block::DeployedContract; use zksync_types::ethabi::encode; use zksync_types::l1::L1Tx; +use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; +use zksync_utils::bytecode::CompressedBytecodeInfo; use zksync_utils::test_utils::LoadnextContractExecutionParams; use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, bytes_to_le_words, h256_to_u256, @@ -63,7 +66,7 @@ use std::time; use zksync_contracts::{ default_erc20_bytecode, get_loadnext_contract, known_codes_contract, load_contract, load_sys_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, - DEFAULT_ACCOUNT_CODE, PLAYGROUND_BLOCK_BOOTLOADER_CODE, PROVED_BLOCK_BOOTLOADER_CODE, + BaseSystemContracts, SystemContractCode, PLAYGROUND_BLOCK_BOOTLOADER_CODE, }; use zksync_crypto::rand::random; use zksync_state::secondary_storage::SecondaryStateStorage; @@ -79,16 +82,18 @@ use zksync_types::{ L2ChainId, PackedEthSignature, StorageKey, StorageLogQueryType, Transaction, H256, KNOWN_CODES_STORAGE_ADDRESS, U256, }; -use zksync_types::{fee::Fee, l2::L2Tx, l2_to_l1_log::L2ToL1Log, tx::ExecutionMetrics}; +use zksync_types::{fee::Fee, l2::L2Tx, l2_to_l1_log::L2ToL1Log}; use zksync_types::{ get_code_key, get_is_account_key, get_known_code_key, get_nonce_key, L1TxCommonData, Nonce, PriorityOpId, SerialId, StorageLog, ZkSyncReadStorage, BOOTLOADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, FAIR_L2_GAS_PRICE, H160, L2_ETH_TOKEN_ADDRESS, - MAX_GAS_PER_PUBDATA_BYTE, MAX_TXS_IN_BLOCK, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_GAS_PRICE_POSITION, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, - SYSTEM_CONTEXT_TX_ORIGIN_POSITION, + CONTRACT_DEPLOYER_ADDRESS, H160, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, + MAX_TXS_IN_BLOCK, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, + SYSTEM_CONTEXT_MINIMAL_BASE_FEE, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, }; +use once_cell::sync::Lazy; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; + fn run_vm_with_custom_factory_deps<'a>( oracle_tools: &'a mut OracleTools<'a, false>, block_context: BlockContext, @@ -97,12 +102,14 @@ fn run_vm_with_custom_factory_deps<'a>( predefined_overhead: u32, expected_error: Option, ) { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); let mut vm = init_vm_inner( oracle_tools, BlockContextMode::OverrideCurrent(block_context.into()), block_properties, BLOCK_GAS_LIMIT, - PLAYGROUND_BLOCK_BOOTLOADER_CODE.code.clone(), + &base_system_contracts, TxExecutionMode::VerifyExecute, ); @@ -116,11 +123,14 @@ fn run_vm_with_custom_factory_deps<'a>( 0, 0, predefined_overhead, + u32::MAX, + 0, + vec![], ), Timestamp(0), ); - let result = vm.execute_next_tx().err(); + let result = vm.execute_next_tx(u32::MAX).err(); assert_eq!(expected_error, result); } @@ -141,13 +151,21 @@ fn test_dummy_bootloader() { let mut oracle_tools = OracleTools::new(storage_ptr); let (block_context, block_properties) = create_test_block_params(); + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + let bootloader_code = read_bootloader_test_code("dummy"); + let bootloader_hash = hash_bytecode(&bootloader_code); + + base_system_contracts.bootloader = SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + }; let mut vm = init_vm_inner( &mut oracle_tools, BlockContextMode::NewBlock(block_context.into(), Default::default()), &block_properties, BLOCK_GAS_LIMIT, - read_bootloader_test_code("dummy"), + &base_system_contracts, TxExecutionMode::VerifyExecute, ); @@ -178,13 +196,23 @@ fn test_bootloader_out_of_gas() { let mut oracle_tools = OracleTools::new(storage_ptr); let (block_context, block_properties) = create_test_block_params(); - // init vm with only 100 gas + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + + let bootloader_code = read_bootloader_test_code("dummy"); + let bootloader_hash = hash_bytecode(&bootloader_code); + + base_system_contracts.bootloader = SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + }; + + // init vm with only 10 ergs let mut vm = init_vm_inner( &mut oracle_tools, BlockContextMode::NewBlock(block_context.into(), Default::default()), &block_properties, 10, - read_bootloader_test_code("dummy"), + &base_system_contracts, TxExecutionMode::VerifyExecute, ); @@ -240,7 +268,7 @@ fn test_default_aa_interaction() { vec![], &[], Fee { - gas_limit: U256::from(10000000u32), + gas_limit: U256::from(20000000u32), max_fee_per_gas: U256::from(base_fee), max_priority_fee_per_gas: U256::from(0), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -263,13 +291,13 @@ fn test_default_aa_interaction() { BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let tx_execution_result = vm - .execute_next_tx() + .execute_next_tx(u32::MAX) .expect("Bootloader failed while processing transaction"); assert_eq!( @@ -323,7 +351,11 @@ fn test_default_aa_interaction() { ); } -fn execute_vm_with_predetermined_refund(txs: Vec, refunds: Vec) -> VmBlockResult { +fn execute_vm_with_predetermined_refund( + txs: Vec, + refunds: Vec, + compressed_bytecodes: Vec>, +) -> VmBlockResult { let (block_context, block_properties) = create_test_block_params(); let block_context: DerivedBlockContext = block_context.into(); @@ -347,7 +379,7 @@ fn execute_vm_with_predetermined_refund(txs: Vec, refunds: Vec BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); @@ -372,6 +404,7 @@ fn execute_vm_with_predetermined_refund(txs: Vec, refunds: Vec let memory_with_suggested_refund = get_bootloader_memory( txs.into_iter().map(Into::into).collect(), refunds, + compressed_bytecodes, TxExecutionMode::VerifyExecute, BlockContextMode::NewBlock(block_context, Default::default()), ); @@ -400,8 +433,10 @@ fn test_predetermined_refunded_gas() { let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); let base_fee = block_context.base_fee; + // We deploy here counter contract, because its logic is trivial let contract_code = read_test_contract(); + let published_bytecode = CompressedBytecodeInfo::from_original(contract_code.clone()).unwrap(); let tx: Transaction = get_deploy_tx( H256::random(), Nonce(0), @@ -409,7 +444,7 @@ fn test_predetermined_refunded_gas() { vec![], &[], Fee { - gas_limit: U256::from(10000000u32), + gas_limit: U256::from(20000000u32), max_fee_per_gas: U256::from(base_fee), max_priority_fee_per_gas: U256::from(0), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -430,14 +465,14 @@ fn test_predetermined_refunded_gas() { BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); let tx_execution_result = vm - .execute_next_tx() + .execute_next_tx(u32::MAX) .expect("Bootloader failed while processing transaction"); assert_eq!( @@ -468,6 +503,7 @@ fn test_predetermined_refunded_gas() { let mut result_with_predetermined_refund = execute_vm_with_predetermined_refund( vec![tx], vec![tx_execution_result.operator_suggested_refund], + vec![vec![published_bytecode]], ); // We need to sort these lists as those are flattened from HashMaps result.full_result.used_contract_hashes.sort(); @@ -573,7 +609,7 @@ fn execute_vm_with_possible_rollbacks( BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); @@ -584,9 +620,10 @@ fn execute_vm_with_possible_rollbacks( &mut vm, test_info.get_transaction(), TxExecutionMode::VerifyExecute, + None, ); - match vm.execute_next_tx() { + match vm.execute_next_tx(u32::MAX) { Err(reason) => { assert_eq!(test_info.rejection_reason(), Some(reason)); } @@ -657,7 +694,7 @@ fn test_vm_rollbacks() { vec![], &[], Fee { - gas_limit: U256::from(5000000u32), + gas_limit: U256::from(12000000u32), max_fee_per_gas: base_fee, max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -671,7 +708,7 @@ fn test_vm_rollbacks() { vec![], &[], Fee { - gas_limit: U256::from(5000000u32), + gas_limit: U256::from(12000000u32), max_fee_per_gas: base_fee, max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -685,7 +722,7 @@ fn test_vm_rollbacks() { vec![], &[], Fee { - gas_limit: U256::from(5000000u32), + gas_limit: U256::from(12000000u32), max_fee_per_gas: base_fee, max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -796,7 +833,7 @@ fn test_vm_rollbacks() { loadnext_contract.factory_deps, &loadnext_constructor_data, Fee { - gas_limit: U256::from(30000000u32), + gas_limit: U256::from(70000000u32), max_fee_per_gas: base_fee, max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -818,7 +855,7 @@ fn test_vm_rollbacks() { nonce, loadnext_contract_address, Fee { - gas_limit: U256::from(60000000u32), + gas_limit: U256::from(100000000u32), max_fee_per_gas: base_fee, max_priority_fee_per_gas: U256::zero(), gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), @@ -837,7 +874,7 @@ fn test_vm_rollbacks() { events: 100, hashes: 500, recursive_calls: 10, - deploys: 100, + deploys: 60, }, Nonce(1), ); @@ -848,7 +885,7 @@ fn test_vm_rollbacks() { events: 100, hashes: 500, recursive_calls: 10, - deploys: 100, + deploys: 60, }, Nonce(2), ); @@ -1009,20 +1046,26 @@ fn run_vm_with_raw_tx<'a>( block_properties: &'a BlockProperties, tx: TransactionData, ) -> (VmExecutionResult, bool) { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); let mut vm = init_vm_inner( oracle_tools, BlockContextMode::OverrideCurrent(block_context), block_properties, BLOCK_GAS_LIMIT, - PLAYGROUND_BLOCK_BOOTLOADER_CODE.code.clone(), + &base_system_contracts, TxExecutionMode::VerifyExecute, ); - let overhead = tx.overhead_gas(); + + let block_gas_price_per_pubdata = block_context.context.block_gas_price_per_pubdata(); + + let overhead = tx.overhead_gas(block_gas_price_per_pubdata as u32); push_raw_transaction_to_bootloader_memory( &mut vm, tx, TxExecutionMode::VerifyExecute, overhead, + None, ); let VmBlockResult { full_result: result, @@ -1228,12 +1271,17 @@ fn test_l1_tx_execution() { BlockContextMode::NewBlock(block_context.into(), Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + push_transaction_to_bootloader_memory( + &mut vm, + &l1_deploy_tx, TxExecutionMode::VerifyExecute, + None, ); - push_transaction_to_bootloader_memory(&mut vm, &l1_deploy_tx, TxExecutionMode::VerifyExecute); - let res = vm.execute_next_tx().unwrap(); + let res = vm.execute_next_tx(u32::MAX).unwrap(); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&contract_code_hash); @@ -1253,19 +1301,38 @@ fn test_l1_tx_execution() { assert_eq!(res.result.logs.l2_to_l1_logs, required_l2_to_l1_logs); let tx = get_l1_execute_test_contract_tx(deployed_address, true); - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); - let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + let res = StorageWritesDeduplicator::apply_on_empty_state( + &vm.execute_next_tx(u32::MAX) + .unwrap() + .result + .logs + .storage_logs, + ); assert_eq!(res.initial_storage_writes, 0); let tx = get_l1_execute_test_contract_tx(deployed_address, false); - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); - let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + let res = StorageWritesDeduplicator::apply_on_empty_state( + &vm.execute_next_tx(u32::MAX) + .unwrap() + .result + .logs + .storage_logs, + ); assert_eq!(res.initial_storage_writes, 2); let repeated_writes = res.repeated_storage_writes; - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); - let res = ExecutionMetrics::new(&vm.execute_next_tx().unwrap().result.logs, 0, 0, 0, 0); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + let res = StorageWritesDeduplicator::apply_on_empty_state( + &vm.execute_next_tx(u32::MAX) + .unwrap() + .result + .logs + .storage_logs, + ); assert_eq!(res.initial_storage_writes, 1); // We do the same storage write, so it will be deduplicated assert_eq!(res.repeated_storage_writes, repeated_writes); @@ -1278,8 +1345,8 @@ fn test_l1_tx_execution() { } _ => unreachable!(), } - push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute); - let execution_result = vm.execute_next_tx().unwrap(); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + let execution_result = vm.execute_next_tx(u32::MAX).unwrap(); // The method is not payable, so the transaction with non-zero value should fail assert_eq!( execution_result.status, @@ -1287,7 +1354,8 @@ fn test_l1_tx_execution() { "The transaction should fail" ); - let res = ExecutionMetrics::new(&execution_result.result.logs, 0, 0, 0, 0); + let res = + StorageWritesDeduplicator::apply_on_empty_state(&execution_result.result.logs.storage_logs); // There are 2 initial writes here: // - totalSupply of ETH token @@ -1302,6 +1370,7 @@ fn test_invalid_bytecode() { let mut raw_storage = SecondaryStateStorage::new(db); insert_system_contracts(&mut raw_storage); let (block_context, block_properties) = create_test_block_params(); + let block_gas_per_pubdata = block_context.block_gas_price_per_pubdata(); let test_vm_with_custom_bytecode_hash = |bytecode_hash: H256, expected_revert_reason: Option| { @@ -1309,8 +1378,10 @@ fn test_invalid_bytecode() { let storage_ptr: &mut dyn Storage = &mut storage_accessor; let mut oracle_tools = OracleTools::new(storage_ptr); - let (encoded_tx, predefined_overhead) = - get_l1_tx_with_custom_bytecode_hash(h256_to_u256(bytecode_hash)); + let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( + h256_to_u256(bytecode_hash), + block_gas_per_pubdata as u32, + ); run_vm_with_custom_factory_deps( &mut oracle_tools, @@ -1375,78 +1446,6 @@ fn test_invalid_bytecode() { ); } -#[derive(Debug)] -enum TestExecutionResult { - Success(Vec), - Revert(Vec), -} - -#[derive(Debug, Default)] -struct TransactionExecutionErrorTracer { - result: Option, -} - -impl Tracer for TransactionExecutionErrorTracer { - type SupportedMemory = SimpleMemory; - const CALL_BEFORE_EXECUTION: bool = true; - - fn before_decoding(&mut self, _state: VmLocalStateData<'_>, _memory: &Self::SupportedMemory) {} - fn after_decoding( - &mut self, - _state: VmLocalStateData<'_>, - _data: AfterDecodingData, - _memory: &Self::SupportedMemory, - ) { - } - fn before_execution( - &mut self, - state: VmLocalStateData<'_>, - data: BeforeExecutionData, - memory: &Self::SupportedMemory, - ) { - let hook = VmHook::from_opcode_memory(&state, &data); - - if matches!(hook, VmHook::ExecutionResult) { - let vm_hook_params = get_vm_hook_params(memory); - - let success = vm_hook_params[0]; - let returndata_ptr = FatPointer::from_u256(vm_hook_params[1]); - let returndata = read_pointer(memory, returndata_ptr); - - assert!( - success == U256::zero() || success == U256::one(), - "The success should be either 0 or 1" - ); - assert!(self.result.is_none(), "The result is emitted twice"); - - let result = if success == U256::zero() { - TestExecutionResult::Revert(returndata) - } else { - TestExecutionResult::Success(returndata) - }; - - self.result = Some(result); - } - } - fn after_execution( - &mut self, - _state: VmLocalStateData<'_>, - _data: AfterExecutionData, - _memory: &Self::SupportedMemory, - ) { - } -} - -impl ExecutionEndTracer for TransactionExecutionErrorTracer { - fn should_stop_execution(&self) -> bool { - // This tracer will not prevent the execution from going forward - // until the end of the block. - false - } -} - -impl PendingRefundTracer for TransactionExecutionErrorTracer {} - #[test] fn test_tracing_of_execution_errors() { // In this test, we are checking that the execution errors are transmitted correctly from the bootloader. @@ -1472,7 +1471,7 @@ fn test_tracing_of_execution_errors() { gas_limit: U256::from(1000000u32), max_fee_per_gas: U256::from(10000000000u64), max_priority_fee_per_gas: U256::zero(), - gas_per_pubdata_limit: U256::from(50000u32), + gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), }, ); @@ -1490,20 +1489,25 @@ fn test_tracing_of_execution_errors() { BlockContextMode::NewBlock(block_context, Default::default()), &block_properties, BLOCK_GAS_LIMIT, - PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), + &BASE_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); - push_transaction_to_bootloader_memory(&mut vm, &tx.into(), TxExecutionMode::VerifyExecute); + push_transaction_to_bootloader_memory( + &mut vm, + &tx.into(), + TxExecutionMode::VerifyExecute, + None, + ); - let mut tracer = TransactionExecutionErrorTracer::default(); + let mut tracer = TransactionResultTracer::default(); assert_eq!( vm.execute_with_custom_tracer(&mut tracer), VmExecutionStopReason::VmFinished, "Tracer should never request stop" ); - match tracer.result { - Some(TestExecutionResult::Revert(revert_reason)) => { + match tracer.revert_reason { + Some(revert_reason) => { let revert_reason = VmRevertReason::try_from(&revert_reason as &[u8]).unwrap(); assert_eq!( revert_reason, @@ -1512,13 +1516,130 @@ fn test_tracing_of_execution_errors() { } ) } - _ => panic!("Tracer captured incorrect result {:#?}", tracer.result), + _ => panic!( + "Tracer captured incorrect result {:#?}", + tracer.revert_reason + ), } } -pub fn get_l1_tx_with_custom_bytecode_hash(bytecode_hash: U256) -> (Vec, u32) { +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +#[test] +fn test_tx_gas_limit_offset() { + let gas_limit = U256::from(999999); + + let (block_context, block_properties) = create_test_block_params(); + let block_context: DerivedBlockContext = block_context.into(); + + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); + let raw_storage = SecondaryStateStorage::new(db); + let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + + let contract_code = read_test_contract(); + let tx: Transaction = get_deploy_tx( + H256::random(), + Nonce(0), + &contract_code, + Default::default(), + Default::default(), + Fee { + gas_limit, + ..Default::default() + }, + ) + .into(); + + let mut oracle_tools = OracleTools::new(storage_ptr); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context, Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + let gas_limit_from_memory = vm + .state + .memory + .read_slot( + BOOTLOADER_HEAP_PAGE as usize, + TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, + ) + .value; + assert_eq!(gas_limit_from_memory, gas_limit); +} + +#[test] +fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + + let (block_context, block_properties) = create_test_block_params(); + let block_context: DerivedBlockContext = block_context.into(); + + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); + let mut raw_storage = SecondaryStateStorage::new(db); + insert_system_contracts(&mut raw_storage); + let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + + let base_fee = block_context.base_fee; + let account_pk = H256::random(); + let contract_code = read_test_contract(); + let tx: Transaction = get_deploy_tx( + account_pk, + Nonce(0), + &contract_code, + vec![], + &[], + Fee { + gas_limit: U256::from(20000000u32), + max_fee_per_gas: U256::from(base_fee), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(MAX_GAS_PER_PUBDATA_BYTE), + }, + ) + .into(); + + let sender_address = tx.initiator_account(); + let nonce_key = get_nonce_key(&sender_address); + + // Check that the next write to the nonce key will be initial. + assert!(storage_ptr.is_write_initial(&nonce_key)); + + // Set balance to be able to pay fee for txs. + let balance_key = storage_key_for_eth_balance(&sender_address); + storage_ptr.set_value(&balance_key, u256_to_h256(U256([0, 0, 1, 0]))); + + let mut oracle_tools = OracleTools::new(storage_ptr); + + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context, Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + &BASE_SYSTEM_CONTRACTS, + TxExecutionMode::VerifyExecute, + ); + + push_transaction_to_bootloader_memory(&mut vm, &tx, TxExecutionMode::VerifyExecute, None); + + vm.execute_next_tx(u32::MAX) + .expect("Bootloader failed while processing the first transaction"); + // Check that `is_write_initial` still returns true for the nonce key. + assert!(storage_ptr.is_write_initial(&nonce_key)); +} + +pub fn get_l1_tx_with_custom_bytecode_hash( + bytecode_hash: U256, + block_gas_per_pubdata: u32, +) -> (Vec, u32) { let tx: TransactionData = get_l1_execute_test_contract_tx(Default::default(), false).into(); - let predefined_overhead = tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash]); + let predefined_overhead = + tx.overhead_gas_with_custom_factory_deps(vec![bytecode_hash], block_gas_per_pubdata); let tx_bytes = tx.abi_encode_with_custom_factory_deps(vec![bytecode_hash]); (bytes_to_be_words(tx_bytes), predefined_overhead) diff --git a/core/lib/vm/src/transaction_data.rs b/core/lib/vm/src/transaction_data.rs index 683d67ec90a5..c87e4fd4fa94 100644 --- a/core/lib/vm/src/transaction_data.rs +++ b/core/lib/vm/src/transaction_data.rs @@ -1,8 +1,8 @@ -use zk_evm::zkevm_opcode_defs::system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}; +use zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_types::ethabi::{encode, Address, Token}; use zksync_types::fee::encoding_len; -use zksync_types::MAX_TXS_IN_BLOCK; use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; +use zksync_types::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; use zksync_utils::{address_to_h256, ceil_div_u256}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; @@ -10,7 +10,7 @@ use crate::vm_with_bootloader::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, }; -const L1_TX_TYPE: u8 = 255; +pub(crate) const L1_TX_TYPE: u8 = 255; // This structure represents the data that is used by // the Bootloader to describe the transaction. @@ -89,7 +89,7 @@ impl From for TransactionData { pubdata_price_limit: common_data.gas_per_pubdata_limit, // It doesn't matter what we put here, since // the bootloader does not charge anything - max_fee_per_gas: U256::zero(), + max_fee_per_gas: common_data.max_fee_per_gas, max_priority_fee_per_gas: U256::zero(), paymaster: Address::default(), nonce: U256::from(common_data.serial_id.0), // priority op ID @@ -159,13 +159,21 @@ impl TransactionData { bytes_to_be_words(bytes) } - pub fn overhead_gas(&self) -> u32 { - if self.tx_type != L1_TX_TYPE { - return 0; + pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { + // It is enforced by the protocol that the L1 transactions always pay the exact amount of gas per pubdata + // as was supplied in the transaction. + if self.tx_type == L1_TX_TYPE { + self.pubdata_price_limit.as_u32() + } else { + block_gas_price_per_pubdata } + } + pub fn overhead_gas(&self, block_gas_price_per_pubdata: u32) -> u32 { let total_gas_limit = self.gas_limit.as_u32(); - let gas_per_pubdata_byte_limit = self.pubdata_price_limit.as_u32(); + let gas_price_per_pubdata = + self.effective_gas_price_per_pubdata(block_gas_price_per_pubdata); + let encoded_len = encoding_len( self.data.len() as u64, self.signature.len() as u64, @@ -174,16 +182,28 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata_byte_limit, encoded_len) + let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + get_amortized_overhead( + total_gas_limit, + gas_price_per_pubdata, + encoded_len, + coeficients, + ) + } + + pub fn trusted_gas_limit(&self, _block_gas_price_per_pubdata: u32) -> u32 { + self.gas_limit.as_u32() } #[cfg(test)] pub(crate) fn overhead_gas_with_custom_factory_deps( &self, factory_deps_hashes: Vec, + block_gas_per_pubdata: u32, ) -> u32 { let total_gas_limit = self.gas_limit.as_u32(); - let gas_per_pubdata_byte_limit = self.pubdata_price_limit.as_u32(); + let gas_per_pubdata_byte_limit = + self.effective_gas_price_per_pubdata(block_gas_per_pubdata); let encoded_len = encoding_len( self.data.len() as u64, self.signature.len() as u64, @@ -191,7 +211,14 @@ impl TransactionData { self.paymaster_input.len() as u64, self.reserved_dynamic.len() as u64, ); - get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata_byte_limit, encoded_len) + + let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + get_amortized_overhead( + total_gas_limit, + gas_per_pubdata_byte_limit, + encoded_len, + coeficients, + ) } #[cfg(test)] @@ -208,16 +235,19 @@ impl TransactionData { } } -pub fn derive_overhead(gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize) -> u32 { - assert!( - gas_limit <= MAX_TX_ERGS_LIMIT, - "gas limit is larger than the maximal one" - ); +pub fn derive_overhead( + gas_limit: u32, + gas_price_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients, +) -> u32 { + // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT + // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value + let gas_limit = std::cmp::min(MAX_TX_ERGS_LIMIT, gas_limit); // Using large U256 type to avoid overflow let max_block_overhead = U256::from(block_overhead_gas(gas_price_per_pubdata)); let gas_limit = U256::from(gas_limit); - let gas_price_per_pubdata = U256::from(gas_price_per_pubdata); let encoded_len = U256::from(encoded_len); // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits @@ -236,36 +266,94 @@ pub fn derive_overhead(gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: let tx_slot_overhead = ceil_div_u256(max_block_overhead, MAX_TXS_IN_BLOCK.into()); // We use "ceil" here for formal reasons to allow easier approach for calculating the overhead in O(1) - let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); + // let max_pubdata_in_tx = ceil_div_u256(gas_limit, gas_price_per_pubdata); // The maximal potential overhead from pubdata - let pubdata_overhead = ceil_div_u256( - max_pubdata_in_tx * max_block_overhead, - MAX_PUBDATA_PER_BLOCK.into(), - ); - - let overhead = vec![ - overhead_for_single_instance_circuits, - overhead_for_length, - tx_slot_overhead, - pubdata_overhead, + // let pubdata_overhead = ceil_div_u256( + // max_pubdata_in_tx * max_block_overhead, + // MAX_PUBDATA_PER_BLOCK.into(), + // ); + + vec![ + (coeficients.ergs_limit_overhead_coeficient + * overhead_for_single_instance_circuits.as_u32() as f64) + .floor() as u32, + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + .floor() as u32, + (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() - .unwrap(); + .unwrap() +} - overhead.as_u32() +/// Contains the coeficients with which the overhead for transactions will be calculated. +/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// at the risk of malicious transactions that may close the block prematurely. +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// result in an integer number +#[derive(Debug, Clone, Copy)] +pub struct OverheadCoeficients { + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, } -pub fn get_maximal_allowed_overhead( +impl OverheadCoeficients { + // This method ensures that the parameters keep the required invariants + fn new_checked( + slot_overhead_coeficient: f64, + bootloader_memory_overhead_coeficient: f64, + ergs_limit_overhead_coeficient: f64, + ) -> Self { + assert!( + (MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient).round() + == MAX_TX_ERGS_LIMIT as f64 / ergs_limit_overhead_coeficient, + "MAX_TX_ERGS_LIMIT / ergs_limit_overhead_coeficient must be an integer" + ); + + Self { + slot_overhead_coeficient, + bootloader_memory_overhead_coeficient, + ergs_limit_overhead_coeficient, + } + } + + // L1->L2 do not receive any discounts + fn new_l1() -> Self { + OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + } + + fn new_l2() -> Self { + OverheadCoeficients::new_checked( + 1.0, 1.0, + // For L2 transactions we allow a certain default discount with regard to the number of ergs. + // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations + // on gas per pubdata allow for roughly 800kk gas per L1 batch, so the rough trust "discount" on the proof's part + // to be paid by the users is 0.1. + 0.1, + ) + } + + pub fn from_tx_type(tx_type: u8) -> Self { + if tx_type == L1_TX_TYPE { + Self::new_l1() + } else { + Self::new_l2() + } + } +} + +/// This method returns the overhead for processing the block +pub fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, + coeficients: OverheadCoeficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); let total_gas_limit = U256::from(total_gas_limit); - let gas_per_pubdata_byte_limit = U256::from(gas_per_pubdata_byte_limit); let encoded_len = U256::from(encoded_len); // Derivation of overhead consists of 4 parts: @@ -295,14 +383,25 @@ pub fn get_maximal_allowed_overhead( // Now, we need to solve each of these separately: // 1. The overhead for occupying a single tx slot is a constant: - let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); + let tx_slot_overhead = { + let tx_slot_overhead = + ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); + (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len - let overhead_for_length = ceil_div_u256( - encoded_len * overhead_for_block_gas, - BOOTLOADER_TX_ENCODING_SPACE.into(), - ); + let overhead_for_length = { + let overhead_for_length = ceil_div_u256( + encoded_len * overhead_for_block_gas, + BOOTLOADER_TX_ENCODING_SPACE.into(), + ) + .as_u32(); + + (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + as u32 + }; + // since the pubdat is not published. If decided to use the pubdata overhead, it needs to be updated. // 3. ceil(O3 * overhead_for_block_gas) >= overhead_gas // O3 = max_pubdata_in_tx / MAX_PUBDATA_PER_BLOCK = ceil(gas_limit / gas_per_pubdata_byte_limit) / MAX_PUBDATA_PER_BLOCK // >= (gas_limit / (gas_per_pubdata_byte_limit * MAX_PUBDATA_PER_BLOCK). Throwing off the `ceil`, while may provide marginally lower @@ -316,49 +415,62 @@ pub fn get_maximal_allowed_overhead( // OB * TL + EP * MP > OE * EP * MP + OE * OB // (OB * TL + EP * MP) / (EP * MP + OB) > OE // OE = floor((OB * TL + EP * MP) / (EP * MP + OB)) with possible -1 if the division is without remainder - let overhead_for_pubdata = { - let numerator: U256 = overhead_for_block_gas * total_gas_limit - + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); - let denominator = - gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; - - // Corner case: if `total_gas_limit` = `gas_per_pubdata_byte_limit` = 0 - // then the numerator will be 0 and subtracting 1 will cause a panic, so we just return a zero. - if numerator.is_zero() { - 0.into() - } else { - (numerator - 1) / denominator - } - }; - - // 4. ceil(O4 * overhead_for_block_gas) >= overhead_gas + // let overhead_for_pubdata = { + // let numerator: U256 = overhead_for_block_gas * total_gas_limit + // + gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK); + // let denominator = + // gas_per_pubdata_byte_limit * U256::from(MAX_PUBDATA_PER_BLOCK) + overhead_for_block_gas; + + // // Corner case: if `total_gas_limit` = `gas_per_pubdata_byte_limit` = 0 + // // then the numerator will be 0 and subtracting 1 will cause a panic, so we just return a zero. + // if numerator.is_zero() { + // 0.into() + // } else { + // (numerator - 1) / denominator + // } + // }; + + // 4. K * ceil(O4 * overhead_for_block_gas) >= overhead_gas, where K is the discount // O4 = gas_limit / MAX_TX_ERGS_LIMIT. Using the notation from the previous equation: - // ceil(OB * GL / MAX_TX_ERGS_LIMIT) >= OE - // ceil(OB * (TL - OE) / MAX_TX_ERGS_LIMIT) >= OE - // OB * (TL - OE) / MAX_TX_ERGS_LIMIT > OE - 1 - // OB * (TL - OE) > OE * MAX_TX_ERGS_LIMIT - MAX_TX_ERGS_LIMIT - // OB * TL + MAX_TX_ERGS_LIMIT > OE * ( MAX_TX_ERGS_LIMIT + OB) - // OE = floor(OB * TL + MAX_TX_ERGS_LIMIT / (MAX_TX_ERGS_LIMIT + OB)), with possible -1 if the division is without remainder + // ceil(OB * GL / MAX_TX_ERGS_LIMIT) >= (OE / K) + // ceil(OB * (TL - OE) / MAX_TX_ERGS_LIMIT) >= (OE/K) + // OB * (TL - OE) / MAX_TX_ERGS_LIMIT > (OE/K) - 1 + // OB * (TL - OE) > (OE/K) * MAX_TX_ERGS_LIMIT - MAX_TX_ERGS_LIMIT + // OB * TL + MAX_TX_ERGS_LIMIT > OE * ( MAX_TX_ERGS_LIMIT/K + OB) + // OE = floor(OB * TL + MAX_TX_ERGS_LIMIT / (MAX_TX_ERGS_LIMIT/K + OB)), with possible -1 if the division is without remainder let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); - let denominator: U256 = U256::from(MAX_TX_ERGS_LIMIT) + overhead_for_block_gas; + let denominator: U256 = U256::from( + (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + ) + overhead_for_block_gas; - (numerator - 1) / denominator - }; + let overhead_for_gas = (numerator - 1) / denominator; - let max_overhead = vec![ - tx_slot_overhead, - overhead_for_length, - overhead_for_pubdata, - overhead_for_gas, - ] - .into_iter() - .max() - // For the sake of consistency making sure that total_gas_limit >= max_overhead - .map(|max_overhead| std::cmp::min(max_overhead, total_gas_limit)) - .unwrap(); + overhead_for_gas.as_u32() + }; - max_overhead.as_u32() + let overhead = vec![tx_slot_overhead, overhead_for_length, overhead_for_gas] + .into_iter() + .max() + // For the sake of consistency making sure that total_gas_limit >= max_overhead + .map(|max_overhead| std::cmp::min(max_overhead, total_gas_limit.as_u32())) + .unwrap(); + + let limit_after_deducting_overhead = total_gas_limit - overhead; + + // During double checking of the overhead, the bootloader will assume that the + // body of the transaction does not have any more than MAX_L2_TX_GAS_LIMIT ergs available to it. + if limit_after_deducting_overhead.as_u64() > MAX_L2_TX_GAS_LIMIT { + // We derive the same overhead that would exist for the MAX_L2_TX_GAS_LIMIT ergs + derive_overhead( + MAX_L2_TX_GAS_LIMIT as u32, + gas_per_pubdata_byte_limit, + encoded_len.as_usize(), + coeficients, + ) + } else { + overhead + } } pub(crate) fn block_overhead_gas(gas_per_pubdata_byte: u32) -> u32 { @@ -377,6 +489,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, + coeficients: OverheadCoeficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -394,6 +507,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, + coeficients, ); derived_overhead >= suggested_overhead @@ -419,33 +533,44 @@ mod tests { #[test] fn test_correctness_for_efficient_overhead() { - let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize| { - assert!( - total_gas_limit / gas_per_pubdata <= MAX_PUBDATA_PER_BLOCK, - "The input data should not allow too much pubdata per block" - ); - + let test_params = |total_gas_limit: u32, + gas_per_pubdata: u32, + encoded_len: usize, + coeficients: OverheadCoeficients| { let result_by_efficient_search = - get_maximal_allowed_overhead(total_gas_limit, gas_per_pubdata, encoded_len); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, + coeficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900); + test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); // Very small parameters - test_params(0, 1, 12); + test_params(0, 1, 12, OverheadCoeficients::new_l2()); // Relatively big parameters - let max_tx_overhead = derive_overhead(MAX_TX_ERGS_LIMIT, 5000, 10000); - test_params(MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000); + let max_tx_overhead = derive_overhead( + MAX_TX_ERGS_LIMIT, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + test_params( + MAX_TX_ERGS_LIMIT + max_tx_overhead, + 5000, + 10000, + OverheadCoeficients::new_l2(), + ); + + test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); } #[test] diff --git a/core/lib/vm/src/utils.rs b/core/lib/vm/src/utils.rs index 2a9c73c497e0..8d75aff725e0 100644 --- a/core/lib/vm/src/utils.rs +++ b/core/lib/vm/src/utils.rs @@ -1,19 +1,20 @@ use crate::{memory::SimpleMemory, vm_with_bootloader::BlockContext}; +use once_cell::sync::Lazy; +use zk_evm::block_properties::BlockProperties; use zk_evm::{ aux_structures::{LogQuery, MemoryPage, Timestamp}, - block_properties::BlockProperties, vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, DEFAULT_ACCOUNT_CODE}; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; use zksync_state::secondary_storage::SecondaryStateStorage; use zksync_types::{ get_code_key, get_system_context_init_logs, system_contracts::get_system_smart_contracts, - Address, L1BatchNumber, StorageLog, StorageLogQuery, FAIR_L2_GAS_PRICE, H160, H256, - MAX_L2_TX_GAS_LIMIT, U256, + Address, L1BatchNumber, StorageLog, StorageLogQuery, H160, H256, MAX_L2_TX_GAS_LIMIT, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; pub const INITIAL_TIMESTAMP: u32 = 1024; pub const INITIAL_MEMORY_COUNTER: u32 = 2048; @@ -230,25 +231,25 @@ pub fn precompile_calls_count_after_timestamp( sorted_timestamps.len() - sorted_timestamps.partition_point(|t| *t < from_timestamp) } -pub fn default_block_properties() -> BlockProperties { - BlockProperties { - default_aa_code_hash: DEFAULT_ACCOUNT_CODE.hash, - zkporter_is_available: false, - } -} +pub static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); pub fn create_test_block_params() -> (BlockContext, BlockProperties) { let context = BlockContext { block_number: 1u32, block_timestamp: 1000, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: FAIR_L2_GAS_PRICE, + l1_gas_price: 50_000_000_000, // 50 gwei + fair_l2_gas_price: 250_000_000, // 0.25 gwei operator_address: H160::zero(), }; - let block_properties = default_block_properties(); - - (context, block_properties) + ( + context, + BlockProperties { + default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }, + ) } pub fn insert_system_contracts(raw_storage: &mut SecondaryStateStorage) { @@ -272,10 +273,9 @@ pub fn insert_system_contracts(raw_storage: &mut SecondaryStateStorage) { raw_storage.save(L1BatchNumber(0)) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - let bytecode = read_zbin_bytecode(format!( +pub fn read_bootloader_test_code(test: &str) -> Vec { + read_zbin_bytecode(format!( "etc/system-contracts/bootloader/tests/artifacts/{}.yul/{}.yul.zbin", test, test - )); - bytes_to_be_words(bytecode) + )) } diff --git a/core/lib/vm/src/vm.rs b/core/lib/vm/src/vm.rs index 0fd543472242..2abba2dd3215 100644 --- a/core/lib/vm/src/vm.rs +++ b/core/lib/vm/src/vm.rs @@ -1,3 +1,4 @@ +use std::convert::TryFrom; use std::fmt::Debug; use zk_evm::aux_structures::Timestamp; @@ -21,8 +22,8 @@ use crate::oracles::decommitter::DecommitterOracle; use crate::oracles::precompile::PrecompilesProcessorWithHistory; use crate::oracles::storage::StorageOracle; use crate::oracles::tracer::{ - BootloaderTracer, ExecutionEndTracer, NoopMemoryTracer, OneTxTracer, PendingRefundTracer, - ValidationError, ValidationTracer, ValidationTracerParams, + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, + TransactionResultTracer, ValidationError, ValidationTracer, ValidationTracerParams, }; use crate::oracles::OracleWithHistory; use crate::utils::{ @@ -87,7 +88,7 @@ pub struct VmExecutionResult { /// available to VM before and after execution. /// /// It means, that depending on the context, `gas_used` may represent different things. - /// If VM is continously invoked and interrupted after each tx, this field may represent the + /// If VM is continuously invoked and interrupted after each tx, this field may represent the /// amount of gas spent by a single transaction. /// /// To understand, which value does `gas_used` represent, see the documentation for the method @@ -370,21 +371,21 @@ impl<'a> VmInstance<'a> { let timestamp = Timestamp(local_state.timestamp); - vlog::trace!("Rollbacking decomitter"); + vlog::trace!("Rolling back decomitter"); self.state .decommittment_processor .rollback_to_timestamp(timestamp); - vlog::trace!("Rollbacking event_sink"); + vlog::trace!("Rolling back event_sink"); self.state.event_sink.rollback_to_timestamp(timestamp); - vlog::trace!("Rollbacking storage"); + vlog::trace!("Rolling back storage"); self.state.storage.rollback_to_timestamp(timestamp); - vlog::trace!("Rollbacking memory"); + vlog::trace!("Rolling back memory"); self.state.memory.rollback_to_timestamp(timestamp); - vlog::trace!("Rollbacking precompiles_processor"); + vlog::trace!("Rolling back precompiles_processor"); self.state .precompiles_processor .rollback_to_timestamp(timestamp); @@ -405,6 +406,12 @@ impl<'a> VmInstance<'a> { self.rollback_to_snapshot(snapshot); } + /// Removes the latest snapshot without rollbacking to it. + /// This function expects that there is at least one snapshot present. + pub fn pop_snapshot_no_rollback(&mut self) { + self.snapshots.pop().unwrap(); + } + /// Returns the amount of gas remaining to the VM. /// Note that this *does not* correspond to the gas limit of a transaction. /// To calculate the amount of gas spent by transaction, you should call this method before and after @@ -413,7 +420,7 @@ impl<'a> VmInstance<'a> { /// Note: this method should only be called when either transaction is fully completed or VM completed /// its execution. Remaining gas value is read from the current stack frame, so if you'll attempt to /// read it during the transaction execution, you may receive invalid value. - fn gas_remaining(&self) -> u32 { + pub(crate) fn gas_remaining(&self) -> u32 { self.state.local_state.callstack.current.ergs_remaining } @@ -487,13 +494,16 @@ impl<'a> VmInstance<'a> { } // Returns a tuple of `VmExecutionStopReason` and the size of the refund proposed by the operator - fn execute_with_custom_tracer_and_refunds( + fn execute_with_custom_tracer_and_refunds< + T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + >( &mut self, tracer: &mut T, ) -> (VmExecutionStopReason, u32) { let mut operator_refund = None; let timestamp_initial = Timestamp(self.state.local_state.timestamp); let gas_remaining_before = self.gas_remaining(); + let spent_pubdata_counter_before = self.state.local_state.spent_pubdata_counter; loop { // Sanity check: we should never reach the maximum value, because then we won't be able to process the next cycle. @@ -520,10 +530,28 @@ impl<'a> VmInstance<'a> { "Operator was asked for refund two times" ); - let refund_to_propose = bootloader_refund - + self.block_overhead_refund(timestamp_initial, gas_remaining_before); - let refund_slot = - OPERATOR_REFUNDS_OFFSET + self.bootloader_state.tx_to_execute() - 1; + let gas_spent_on_pubdata = tracer.gas_spent_on_pubdata(&self.state.local_state) + - spent_pubdata_counter_before; + let tx_body_refund = + self.tx_body_refund(timestamp_initial, bootloader_refund, gas_spent_on_pubdata); + + if tx_body_refund < bootloader_refund { + vlog::error!( + "Suggested tx body refund is less than bootloader refund. Tx body refund: {}, bootloader refund: {}", + tx_body_refund, + bootloader_refund + ); + } + + let refund_to_propose = tx_body_refund + + self.block_overhead_refund( + timestamp_initial, + gas_remaining_before, + gas_spent_on_pubdata, + ); + + let current_tx_index = self.bootloader_state.tx_to_execute() - 1; + let refund_slot = OPERATOR_REFUNDS_OFFSET + current_tx_index; // Writing the refund into memory self.state.memory.memory.write_to_memory( @@ -537,6 +565,31 @@ impl<'a> VmInstance<'a> { ); operator_refund = Some(refund_to_propose); tracer.set_refund_as_done(); + + let tx_gas_limit = self.get_tx_gas_limit(current_tx_index); + + if tx_gas_limit < bootloader_refund { + vlog::error!( + "Tx gas limit is less than bootloader refund. Tx gas limit: {}, bootloader refund: {}", + tx_gas_limit, + bootloader_refund + ); + } + if tx_gas_limit < refund_to_propose { + vlog::error!( + "Tx gas limit is less than operator refund. Tx gas limit: {}, operator refund: {}", + tx_gas_limit, + refund_to_propose + ); + } + + metrics::histogram!("vm.refund", bootloader_refund as f64 / tx_gas_limit as f64 * 100.0, "type" => "bootloader"); + metrics::histogram!("vm.refund", refund_to_propose as f64 / tx_gas_limit as f64 * 100.0, "type" => "operator"); + metrics::histogram!( + "vm.refund.diff", + (refund_to_propose as f64 - bootloader_refund as f64) / tx_gas_limit as f64 + * 100.0 + ); } if tracer.should_stop_execution() { @@ -548,124 +601,10 @@ impl<'a> VmInstance<'a> { } } - /// Calculates the refund for the block overhead. - /// This refund is the difference between how much user paid in advance for the block overhead - /// and how much he should pay based on actual tx execution result. - fn block_overhead_refund(&self, _from_timestamp: Timestamp, _gas_remaining_before: u32) -> u32 { - 0 - - // let pubdata_used = self.pubdata_used(from_timestamp); - - // let gas_used = gas_remaining_before - self.gas_remaining(); - // // Can be fixed in the scope of SMA-1654 because it also requires calculation of `pubdata_paid_for`. - // let computational_gas_used = - // gas_used - pubdata_used * self.state.local_state.current_ergs_per_pubdata_byte; - // let (_, l2_to_l1_logs) = self.collect_events_and_l1_logs_after_timestamp(from_timestamp); - // let current_tx_index = self.bootloader_state.tx_to_execute() - 1; - - // let actual_overhead = Self::actual_overhead_gas( - // self.state.local_state.current_ergs_per_pubdata_byte, - // self.bootloader_state.get_tx_size(current_tx_index), - // pubdata_used, - // computational_gas_used, - // self.state - // .decommittment_processor - // .get_number_of_decommitment_requests_after_timestamp(from_timestamp), - // l2_to_l1_logs.len(), - // ); - - // let predefined_overhead = self - // .state - // .memory - // .read_slot( - // BOOTLOADER_HEAP_PAGE as usize, - // TX_OVERHEAD_OFFSET + current_tx_index, - // ) - // .value - // .as_u32(); - - // if actual_overhead <= predefined_overhead { - // predefined_overhead - actual_overhead - // } else { - // // This should never happen but potential mistakes at the early stage should not bring the server down. - // vlog::error!( - // "Actual overhead is greater than predefined one, actual: {}, predefined: {}", - // actual_overhead, - // predefined_overhead - // ); - // 0 - // } - } - - #[allow(dead_code)] - fn actual_overhead_gas( - _gas_per_pubdata_byte_limit: u32, - _encoded_len: usize, - _pubdata_used: u32, - _computational_gas_used: u32, - _number_of_decommitment_requests: usize, - _l2_l1_logs: usize, - ) -> u32 { - 0 - - // let overhead_for_block_gas = U256::from(crate::transaction_data::block_overhead_gas( - // gas_per_pubdata_byte_limit, - // )); - - // let encoded_len = U256::from(encoded_len); - // let pubdata_used = U256::from(pubdata_used); - // let computational_gas_used = U256::from(computational_gas_used); - // let number_of_decommitment_requests = U256::from(number_of_decommitment_requests); - // let l2_l1_logs = U256::from(l2_l1_logs); - - // let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()); - - // let overhead_for_length = ceil_div_u256( - // encoded_len * overhead_for_block_gas, - // BOOTLOADER_TX_ENCODING_SPACE.into(), - // ); - - // let actual_overhead_for_pubdata = ceil_div_u256( - // pubdata_used * overhead_for_block_gas, - // MAX_PUBDATA_PER_BLOCK.into(), - // ); - - // let actual_gas_limit_overhead = ceil_div_u256( - // computational_gas_used * overhead_for_block_gas, - // MAX_BLOCK_MULTIINSTANCE_GAS_LIMIT.into(), - // ); - - // let code_decommitter_sorter_circuit_overhead = ceil_div_u256( - // number_of_decommitment_requests * overhead_for_block_gas, - // GEOMETRY_CONFIG.limit_for_code_decommitter_sorter.into(), - // ); - - // let l1_l2_logs_overhead = ceil_div_u256( - // l2_l1_logs * overhead_for_block_gas, - // std::cmp::min( - // GEOMETRY_CONFIG.limit_for_l1_messages_merklizer, - // GEOMETRY_CONFIG.limit_for_l1_messages_pudata_hasher, - // ) - // .into(), - // ); - - // let overhead = vec![ - // tx_slot_overhead, - // overhead_for_length, - // actual_overhead_for_pubdata, - // actual_gas_limit_overhead, - // code_decommitter_sorter_circuit_overhead, - // l1_l2_logs_overhead, - // ] - // .into_iter() - // .max() - // .unwrap(); - - // overhead.as_u32() - } - // Executes VM until the end or tracer says to stop. - pub(crate) fn execute_with_custom_tracer( + pub(crate) fn execute_with_custom_tracer< + T: ExecutionEndTracer + PendingRefundTracer + PubdataSpentTracer, + >( &mut self, tracer: &mut T, ) -> VmExecutionStopReason { @@ -676,9 +615,12 @@ impl<'a> VmInstance<'a> { // Ok(status: TxExecutionStatus::Success) when the transaction succeeded // Ok(status: TxExecutionStatus::Failure) when the transaction failed. // Note that failed transactions are considered properly processed and are included in blocks - pub fn execute_next_tx(&mut self) -> Result { + pub fn execute_next_tx( + &mut self, + validation_computational_gas_limit: u32, + ) -> Result { let tx_index = self.bootloader_state.next_unexecuted_tx() as u32; - let mut tx_tracer = OneTxTracer::default(); + let mut tx_tracer = OneTxTracer::new(validation_computational_gas_limit); let timestamp_initial = Timestamp(self.state.local_state.timestamp); let cycles_initial = self.state.local_state.monotonic_cycle_counter; @@ -729,8 +671,16 @@ impl<'a> VmInstance<'a> { - cycles_initial, }, }) + } else if tx_tracer.validation_run_out_of_gas() { + Err(TxRevertReason::ValidationFailed(VmRevertReason::General { + msg: format!( + "Took too many computational gas, allowed limit: {}", + validation_computational_gas_limit + ), + })) } else { - // VM ended up in state `stop_reason == VmExecutionStopReason::TracerRequestedStop && !tx_tracer.tx_has_been_processed()`. + // VM ended up in state + // `stop_reason == VmExecutionStopReason::TracerRequestedStop && !tx_tracer.tx_has_been_processed() && !tx_tracer.validation_run_out_of_gas()`. // It means that bootloader successfully finished its execution without executing the transaction. // It is an unexpected situation. panic!("VM successfully finished executing bootloader but transaction wasn't executed"); @@ -745,21 +695,39 @@ impl<'a> VmInstance<'a> { let cycles_initial = self.state.local_state.monotonic_cycle_counter; let gas_before = self.gas_remaining(); - let stop_reason = self.execute_with_custom_tracer(&mut NoopMemoryTracer); + let mut tx_result_tracer = TransactionResultTracer::default(); + let stop_reason = self.execute_with_custom_tracer(&mut tx_result_tracer); match stop_reason { VmExecutionStopReason::VmFinished => { let mut full_result = vm_may_have_ended(self, gas_before).unwrap(); - // if `job_type == BootloaderJobType::TransactionExecution` it means - // that the transaction has been executed as eth_call. if job_type == BootloaderJobType::TransactionExecution && tx_has_failed(&self.state, 0) && full_result.revert_reason.is_none() { - full_result.revert_reason = Some(VmRevertReasonParsingResult { - revert_reason: TxRevertReason::TxOutOfGas, - original_data: vec![], - }); + let revert_reason = tx_result_tracer + .revert_reason + .map(|reason| { + let vm_revert_reason = VmRevertReason::try_from(reason.as_slice()) + .unwrap_or_else(|_| VmRevertReason::Unknown { + function_selector: vec![], + data: reason.clone(), + }); + + VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(vm_revert_reason), + original_data: reason, + } + }) + .unwrap_or_else(|| VmRevertReasonParsingResult { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + }), + original_data: vec![], + }); + + full_result.revert_reason = Some(revert_reason); } let block_tip_result = VmPartialExecutionResult { diff --git a/core/lib/vm/src/vm_with_bootloader.rs b/core/lib/vm/src/vm_with_bootloader.rs index 3f591f97dff6..b30ae804a87a 100644 --- a/core/lib/vm/src/vm_with_bootloader.rs +++ b/core/lib/vm/src/vm_with_bootloader.rs @@ -11,21 +11,23 @@ use zk_evm::{ }, }; use zksync_config::constants::MAX_TXS_IN_BLOCK; -use zksync_contracts::{ - DEFAULT_ACCOUNT_CODE, ESTIMATE_FEE_BLOCK_CODE, PLAYGROUND_BLOCK_BOOTLOADER_CODE, - PROVED_BLOCK_BOOTLOADER_CODE, -}; +use zksync_contracts::BaseSystemContracts; use zksync_types::{ zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; -use zksync_utils::{address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, misc::ceil_div}; +use zksync_utils::{ + address_to_u256, + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + bytes_to_be_words, h256_to_u256, + misc::ceil_div, +}; use crate::{ bootloader_state::BootloaderState, oracles::OracleWithHistory, - transaction_data::TransactionData, + transaction_data::{TransactionData, L1_TX_TYPE}, utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, }, @@ -52,6 +54,12 @@ pub struct BlockContext { pub fair_l2_gas_price: u64, } +impl BlockContext { + pub fn block_gas_price_per_pubdata(&self) -> u64 { + derive_base_fee_and_gas_per_pubdata(self.l1_gas_price, self.fair_l2_gas_price).1 + } +} + /// Besides the raw values from the `BlockContext`, contains the values that are to be derived /// from the other values #[derive(Debug, Copy, Clone)] @@ -60,7 +68,7 @@ pub struct DerivedBlockContext { pub base_fee: u64, } -fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { +pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { // This value will typically be a lot less than u64 // unless the gas price on L1 goes beyond tens of millions of gwei l1_gas_price * (L1_GAS_PER_PUBDATA_BYTE as u64) @@ -128,7 +136,15 @@ pub const OPERATOR_REFUNDS_OFFSET: usize = DEBUG_SLOTS_OFFSET pub const TX_OVERHEAD_OFFSET: usize = OPERATOR_REFUNDS_OFFSET + OPERATOR_REFUNDS_SLOTS; pub const TX_OVERHEAD_SLOTS: usize = MAX_TXS_IN_BLOCK; -pub const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = TX_OVERHEAD_OFFSET + TX_OVERHEAD_SLOTS; +pub const TX_TRUSTED_GAS_LIMIT_OFFSET: usize = TX_OVERHEAD_OFFSET + TX_OVERHEAD_SLOTS; +pub const TX_TRUSTED_GAS_LIMIT_SLOTS: usize = MAX_TXS_IN_BLOCK; + +pub const COMPRESSED_BYTECODES_OFFSET: usize = + TX_TRUSTED_GAS_LIMIT_OFFSET + TX_TRUSTED_GAS_LIMIT_SLOTS; +pub const COMPRESSED_BYTECODES_SLOTS: usize = 32768; + +pub const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = + COMPRESSED_BYTECODES_OFFSET + COMPRESSED_BYTECODES_SLOTS; // The size of the bootloader memory dedicated to the encodings of transactions pub const BOOTLOADER_TX_ENCODING_SPACE: u32 = @@ -143,6 +159,8 @@ pub const TX_DESCRIPTION_OFFSET: usize = BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * MAX_TXS_IN_BLOCK + MAX_POSTOP_SLOTS; +pub const TX_GAS_LIMIT_OFFSET: usize = 4; + pub(crate) const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; const BOOTLOADER_CODE_PAGE: u32 = code_page_candidate_from_base(MemoryPage(INITIAL_BASE_PAGE)).0; @@ -178,12 +196,14 @@ pub fn init_vm<'a>( block_context: BlockContextMode, block_properties: &'a BlockProperties, execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, ) -> Box> { init_vm_with_gas_limit( oracle_tools, block_context, block_properties, execution_mode, + base_system_contract, BLOCK_GAS_LIMIT, ) } @@ -193,23 +213,15 @@ pub fn init_vm_with_gas_limit<'a>( block_context: BlockContextMode, block_properties: &'a BlockProperties, execution_mode: TxExecutionMode, + base_system_contract: &BaseSystemContracts, gas_limit: u32, ) -> Box> { - let bootloader_code = match (&block_context, execution_mode) { - (_, TxExecutionMode::EthCall) => PLAYGROUND_BLOCK_BOOTLOADER_CODE.code.clone(), - (BlockContextMode::OverrideCurrent(_), TxExecutionMode::VerifyExecute) => { - PLAYGROUND_BLOCK_BOOTLOADER_CODE.code.clone() - } - (_, TxExecutionMode::EstimateFee) => ESTIMATE_FEE_BLOCK_CODE.code.clone(), - _ => PROVED_BLOCK_BOOTLOADER_CODE.code.clone(), - }; - init_vm_inner( oracle_tools, block_context, block_properties, gas_limit, - bootloader_code, + base_system_contract, execution_mode, ) } @@ -296,18 +308,24 @@ pub fn init_vm_inner<'a>( block_context: BlockContextMode, block_properties: &'a BlockProperties, gas_limit: u32, - bootloader_bytecode: Vec, + base_system_contract: &BaseSystemContracts, execution_mode: TxExecutionMode, ) -> Box> { let start = Instant::now(); oracle_tools.decommittment_processor.populate( - vec![(DEFAULT_ACCOUNT_CODE.hash, DEFAULT_ACCOUNT_CODE.code.clone())], + vec![( + h256_to_u256(base_system_contract.default_aa.hash), + base_system_contract.default_aa.code.clone(), + )], Timestamp(0), ); oracle_tools.memory.populate( - vec![(BOOTLOADER_CODE_PAGE, bootloader_bytecode)], + vec![( + BOOTLOADER_CODE_PAGE, + base_system_contract.bootloader.code.clone(), + )], Timestamp(0), ); @@ -339,20 +357,39 @@ fn bootloader_initial_memory(block_properties: &BlockContextMode) -> Vec<(usize, pub fn get_bootloader_memory( txs: Vec, predefined_refunds: Vec, + predefined_compressed_bytecodes: Vec>, execution_mode: TxExecutionMode, block_context: BlockContextMode, ) -> Vec<(usize, U256)> { + let inner_context = block_context.inner_block_context().context; + + let block_gas_price_per_pubdata = inner_context.block_gas_price_per_pubdata(); + let mut memory = bootloader_initial_memory(&block_context); + let mut previous_compressed: usize = 0; let mut already_included_txs_size = 0; for (tx_index_in_block, tx) in txs.into_iter().enumerate() { + let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + + let mut total_compressed_len_words = 0; + for i in compressed_bytecodes.iter() { + total_compressed_len_words += i.encode_call().len() / 32; + } + let memory_for_current_tx = get_bootloader_memory_for_tx( tx.clone(), tx_index_in_block, execution_mode, already_included_txs_size, predefined_refunds[tx_index_in_block], + block_gas_price_per_pubdata as u32, + previous_compressed, + compressed_bytecodes, ); + + previous_compressed += total_compressed_len_words; + memory.extend(memory_for_current_tx); let encoded_struct = tx.into_tokens(); let encoding_length = encoded_struct.len(); @@ -365,10 +402,18 @@ pub fn push_transaction_to_bootloader_memory( vm: &mut VmInstance, tx: &Transaction, execution_mode: TxExecutionMode, + explicit_compressed_bytecodes: Option>, ) { let tx: TransactionData = tx.clone().into(); - let overhead = tx.overhead_gas(); - push_raw_transaction_to_bootloader_memory(vm, tx, execution_mode, overhead); + let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); + let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); + push_raw_transaction_to_bootloader_memory( + vm, + tx, + execution_mode, + overhead, + explicit_compressed_bytecodes, + ); } pub fn push_raw_transaction_to_bootloader_memory( @@ -376,6 +421,7 @@ pub fn push_raw_transaction_to_bootloader_memory( tx: TransactionData, execution_mode: TxExecutionMode, predefined_overhead: u32, + explicit_compressed_bytecodes: Option>, ) { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -386,13 +432,60 @@ pub fn push_raw_transaction_to_bootloader_memory( .iter() .map(|dep| bytecode_to_factory_dep(dep.clone())) .collect(); + + let compressed_bytecodes = explicit_compressed_bytecodes.unwrap_or_else(|| { + if tx.tx_type == L1_TX_TYPE { + // L1 transactions do not need compression + return vec![]; + } + + tx.factory_deps + .iter() + .filter_map(|bytecode| { + if vm + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(bytecode)) + { + return None; + } + + compress_bytecode(bytecode) + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: bytecode.clone(), + compressed, + }) + }) + .collect() + }); + let compressed_bytecodes_encoding_len_words = compressed_bytecodes + .iter() + .map(|bytecode| { + let encoding_length_bytes = bytecode.encode_call().len(); + assert!( + encoding_length_bytes % 32 == 0, + "ABI encoding of bytecode is not 32-byte aligned" + ); + + encoding_length_bytes / 32 + }) + .sum(); + vm.state .decommittment_processor .populate(codes_for_decommiter, timestamp); + let block_gas_price_per_pubdata = vm.block_context.context.block_gas_price_per_pubdata(); + let trusted_ergs_limit = tx.trusted_gas_limit(block_gas_price_per_pubdata as u32); let encoded_tx = tx.into_tokens(); let encoded_tx_size = encoded_tx.len(); + let previous_bytecodes = vm.bootloader_state.get_compressed_bytecodes(); + let bootloader_memory = get_bootloader_memory_for_encoded_tx( encoded_tx, tx_index_in_block, @@ -400,6 +493,9 @@ pub fn push_raw_transaction_to_bootloader_memory( already_included_txs_size, 0, predefined_overhead, + trusted_ergs_limit, + previous_bytecodes, + compressed_bytecodes, ); vm.state.memory.populate_page( @@ -408,16 +504,23 @@ pub fn push_raw_transaction_to_bootloader_memory( Timestamp(vm.state.local_state.timestamp), ); vm.bootloader_state.add_tx_data(encoded_tx_size); + vm.bootloader_state + .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); } +#[allow(clippy::too_many_arguments)] fn get_bootloader_memory_for_tx( tx: TransactionData, tx_index_in_block: usize, execution_mode: TxExecutionMode, already_included_txs_size: usize, predefined_refund: u32, + block_gas_per_pubdata: u32, + previous_compressed_bytecode_size: usize, + compressed_bytecodes: Vec, ) -> Vec<(usize, U256)> { - let overhead_gas = tx.overhead_gas(); + let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); + let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); get_bootloader_memory_for_encoded_tx( tx.into_tokens(), tx_index_in_block, @@ -425,9 +528,13 @@ fn get_bootloader_memory_for_tx( already_included_txs_size, predefined_refund, overhead_gas, + trusted_gas_limit, + previous_compressed_bytecode_size, + compressed_bytecodes, ) } +#[allow(clippy::too_many_arguments)] pub(crate) fn get_bootloader_memory_for_encoded_tx( encoded_tx: Vec, tx_index_in_block: usize, @@ -435,6 +542,9 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( already_included_txs_size: usize, predefined_refund: u32, predefined_overhead: u32, + trusted_gas_limit: u32, + previous_compressed_bytecode_size: usize, + compressed_bytecodes: Vec, ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -458,10 +568,29 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( let overhead_offset = TX_OVERHEAD_OFFSET + tx_index_in_block; memory.push((overhead_offset, predefined_overhead.into())); + let trusted_gas_limit_offset = TX_TRUSTED_GAS_LIMIT_OFFSET + tx_index_in_block; + memory.push((trusted_gas_limit_offset, trusted_gas_limit.into())); + // Now we need to actually put the transaction description: let encoding_length = encoded_tx.len(); memory.extend((tx_description_offset..tx_description_offset + encoding_length).zip(encoded_tx)); + // Note, +1 is moving for poitner + let compressed_bytecodes_offset = + COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; + + let memory_addition: Vec<_> = compressed_bytecodes + .into_iter() + .flat_map(|x| x.encode_call()) + .collect(); + + let memory_addition = bytes_to_be_words(memory_addition); + + memory.extend( + (compressed_bytecodes_offset..compressed_bytecodes_offset + memory_addition.len()) + .zip(memory_addition), + ); + memory } diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 3bdbd27a40e5..8fb2374f25f4 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -6,7 +6,7 @@ use zksync_types::api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails}; use zksync_types::transaction_request::CallRequest; use zksync_types::{ api::U64, - explorer_api::BlockDetails, + explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, Address, H256, U256, @@ -29,6 +29,9 @@ pub trait ZksNamespace { #[method(name = "estimateFee")] fn estimate_fee(&self, req: CallRequest) -> RpcResult; + #[method(name = "zks_estimateGasL1ToL2")] + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult; + #[method(name = "getMainContract")] fn get_main_contract(&self) -> RpcResult
; @@ -92,4 +95,13 @@ pub trait ZksNamespace { #[method(name = "getTransactionDetails")] fn get_transaction_details(&self, hash: H256) -> RpcResult>; + + #[method(name = "getRawBlockTransactions")] + fn get_raw_block_transactions( + &self, + block_number: MiniblockNumber, + ) -> RpcResult>; + + #[method(name = "getL1BatchDetails")] + fn get_l1_batch_details(&self, batch: L1BatchNumber) -> RpcResult>; } diff --git a/core/tests/loadnext/Cargo.toml b/core/tests/loadnext/Cargo.toml index d4f7e4d1c71e..cf15999277f9 100644 --- a/core/tests/loadnext/Cargo.toml +++ b/core/tests/loadnext/Cargo.toml @@ -11,7 +11,7 @@ categories = ["cryptography"] publish = false # We don't want to publish our tests. [dependencies] -zksync = { path = "../../../sdk/zksync-rs", version = "0.3", features = ["mint"] } +zksync = { path = "../../../sdk/zksync-rs", features = ["mint"] } zksync_types = { path = "../../lib/types", version = "1.0" } zksync_utils = { path = "../../lib/utils", version = "1.0" } zksync_eth_signer = { path = "../../lib/eth_signer", version = "1.0" } diff --git a/core/tests/loadnext/src/account/explorer_api_executor.rs b/core/tests/loadnext/src/account/explorer_api_executor.rs index 4cf6d03ce1f3..4e4852a21f26 100644 --- a/core/tests/loadnext/src/account/explorer_api_executor.rs +++ b/core/tests/loadnext/src/account/explorer_api_executor.rs @@ -216,6 +216,7 @@ impl AccountLifespan { from_block_number: from_block, from_tx_index: None, block_number: None, + l1_batch_number: None, address: None, account_address: None, contract_address: None, @@ -237,6 +238,7 @@ impl AccountLifespan { from_block_number: from_block, from_tx_index: None, block_number: None, + l1_batch_number: None, address: None, account_address: Some(self.wallet.wallet.address()), contract_address: None, diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index 4a6d8abd617f..a8ce01f03ebd 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -1,6 +1,7 @@ use futures::{channel::mpsc, future::join_all}; use std::ops::Add; use tokio::task::JoinHandle; +use zksync_types::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; use zksync::ethereum::{PriorityOpHolder, DEFAULT_PRIORITY_FEE}; use zksync::utils::{ @@ -560,9 +561,21 @@ impl Executor { // Assuming that gas prices on testnets are somewhat stable, we will consider it a constant. let average_gas_price = ethereum.client().get_gas_price("executor").await?; - Ok((average_gas_price + U256::from(DEFAULT_PRIORITY_FEE)) - * MAX_L1_TRANSACTION_GAS_LIMIT - * MAX_L1_TRANSACTIONS) + let gas_price_with_priority = average_gas_price + U256::from(DEFAULT_PRIORITY_FEE); + + let average_l1_to_l2_gas_limit = 5_000_000u32; + let average_price_for_l1_to_l2_execute = ethereum + .base_cost( + average_l1_to_l2_gas_limit.into(), + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE as u32, + Some(gas_price_with_priority), + ) + .await?; + + Ok( + gas_price_with_priority * MAX_L1_TRANSACTION_GAS_LIMIT * MAX_L1_TRANSACTIONS + + average_price_for_l1_to_l2_execute * MAX_L1_TRANSACTIONS, + ) } /// Waits for all the test account futures to be completed. @@ -581,7 +594,7 @@ impl Executor { /// Returns the amount of funds to be distributed between accounts on l1. fn amount_for_l1_distribution(&self) -> u128 { - u128::MAX >> 32 + u128::MAX >> 29 } /// Ensures that Ethereum transaction was successfully executed. diff --git a/core/tests/loadnext/src/main.rs b/core/tests/loadnext/src/main.rs index 2a2ac778942b..c13b8f54f1a0 100644 --- a/core/tests/loadnext/src/main.rs +++ b/core/tests/loadnext/src/main.rs @@ -13,7 +13,7 @@ use loadnext::{ #[tokio::main] async fn main() -> anyhow::Result<()> { - vlog::init(); + let _sentry_guard = vlog::init(); let config = LoadtestConfig::from_env() .expect("Config parameters should be loaded from env or from default values"); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 99eef4effd5f..a4087b24eb32 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -76,6 +76,7 @@ impl ZkSyncAccount { _increment_nonce: bool, ) -> L2Tx { todo!("New withdrawal support is not yet implemented") + // let mut stored_nonce = self.nonce.lock().unwrap(); // let withdraw = GenericL2Tx::::new_signed( // token, diff --git a/core/tests/testkit/src/commands/gas_price/mod.rs b/core/tests/testkit/src/commands/gas_price/mod.rs index c976af6585f4..d32f4f4cfd6a 100644 --- a/core/tests/testkit/src/commands/gas_price/mod.rs +++ b/core/tests/testkit/src/commands/gas_price/mod.rs @@ -36,7 +36,7 @@ pub async fn test_gas_price() { let test_db_manager = TestDatabaseManager::new().await; let mut storage = test_db_manager.connect_to_postgres().await; { - ensure_genesis_state(&mut storage, config.clone()).await; + ensure_genesis_state(&mut storage, &config).await; } println!("deploying contracts"); diff --git a/core/tests/testkit/src/commands/revert_block.rs b/core/tests/testkit/src/commands/revert_block.rs index 0640dc72d333..32cd06496d41 100644 --- a/core/tests/testkit/src/commands/revert_block.rs +++ b/core/tests/testkit/src/commands/revert_block.rs @@ -16,7 +16,7 @@ pub async fn test_revert_blocks() { let test_db_manager = TestDatabaseManager::new().await; let db = test_db_manager.get_db(); - ensure_genesis_state(db.clone(), config.clone()); + ensure_genesis_state(db.clone(), &config); println!("deploying contracts"); let deploy_timer = Instant::now(); diff --git a/core/tests/testkit/src/commands/upgrade_contract.rs b/core/tests/testkit/src/commands/upgrade_contract.rs index 80835e616f84..c40faf8aa29d 100644 --- a/core/tests/testkit/src/commands/upgrade_contract.rs +++ b/core/tests/testkit/src/commands/upgrade_contract.rs @@ -17,7 +17,7 @@ pub async fn test_upgrade_contract() { let test_db_manager = TestDatabaseManager::new().await; let db = test_db_manager.get_db(); - ensure_genesis_state(db.clone(), config.clone()); + ensure_genesis_state(db.clone(), &config); println!("deploying contracts"); let deploy_timer = Instant::now(); diff --git a/core/tests/testkit/src/eth_provider.rs b/core/tests/testkit/src/eth_provider.rs index 3de656be30ad..b0d2b4a527b1 100644 --- a/core/tests/testkit/src/eth_provider.rs +++ b/core/tests/testkit/src/eth_provider.rs @@ -82,6 +82,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let get_base_cost_func_name = match tx_id { // TransactionID::Deposit => "depositBaseCost", // TransactionID::AddToken => "addTokenBaseCost", @@ -111,7 +112,6 @@ impl EthereumProvider { // .ok_or_else(|| { // format_err!("overflow when adding layer 1 base cost and layer 2 tip fee") // }) - todo!("Testkit is not updated yet") } pub async fn erc20_balance( @@ -204,6 +204,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( // TransactionID::AddToken, @@ -242,8 +243,6 @@ impl EthereumProvider { // send_raw_tx_wait_confirmation(&self.main_contract_eth_client, signed_tx.raw_tx).await?; // Ok(EthExecResult::new(receipt, &self.main_contract_eth_client).await) - todo!("Testkit is not updated yet") - } pub async fn add_token( @@ -253,6 +252,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( // TransactionID::AddToken, @@ -284,8 +284,6 @@ impl EthereumProvider { // send_raw_tx_wait_confirmation(&self.main_contract_eth_client, signed_tx.raw_tx).await?; // Ok(EthExecResult::new(receipt, &self.main_contract_eth_client).await) - todo!("Testkit is not updated yet") - } pub async fn request_withdraw( @@ -297,6 +295,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( // TransactionID::Withdraw, @@ -334,7 +333,6 @@ impl EthereumProvider { // send_raw_tx_wait_confirmation(&self.main_contract_eth_client, signed_tx.raw_tx).await?; // Ok(EthExecResult::new(receipt, &self.main_contract_eth_client).await) - todo!("Testkit is not updated yet") } pub async fn deposit_eth( @@ -345,6 +343,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( // TransactionID::Deposit, @@ -381,7 +380,6 @@ impl EthereumProvider { // send_raw_tx_wait_confirmation(&self.main_contract_eth_client, signed_tx.raw_tx).await?; // Ok(EthExecResult::new(receipt, &self.main_contract_eth_client).await) - todo!("Testkit is not updated yet") } pub async fn send_eth(&self, to: Address, value: BigUint) -> anyhow::Result { @@ -448,6 +446,7 @@ impl EthereumProvider { _processing_type: OpProcessingType, _layer_2_tip_fee: BigUint, ) -> anyhow::Result { + todo!("Testkit is not updated yet") // let value = self // .get_layer_1_base_cost( // TransactionID::Deposit, @@ -484,7 +483,6 @@ impl EthereumProvider { // send_raw_tx_wait_confirmation(&self.main_contract_eth_client, signed_tx.raw_tx).await?; // Ok(EthExecResult::new(receipt, &self.main_contract_eth_client).await) - todo!("Testkit is not updated yet") } pub async fn commit_blocks( diff --git a/core/tests/testkit/src/main.rs b/core/tests/testkit/src/main.rs index 7864664fe26e..fdf1558b1afe 100644 --- a/core/tests/testkit/src/main.rs +++ b/core/tests/testkit/src/main.rs @@ -37,7 +37,7 @@ async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); if opt.debug { - vlog::init(); + let _sentry_guard = vlog::init(); } match opt.command { diff --git a/core/tests/testkit/src/tester.rs b/core/tests/testkit/src/tester.rs index 52e29ea45668..3e1f2ca090fc 100644 --- a/core/tests/testkit/src/tester.rs +++ b/core/tests/testkit/src/tester.rs @@ -42,6 +42,7 @@ pub struct Tester { account_balances: AccountBalances, server_handler: ServerHandler, state: State, + config: ZkSyncConfig, } impl Tester { @@ -54,7 +55,7 @@ impl Tester { let server_handler = ServerHandler::spawn_server( db_manager.get_db_path(), db_manager.get_state_keeper_db(), - config, + config.clone(), db_manager.create_pool(), db_manager.create_pool(), db_manager.create_pool(), @@ -69,6 +70,7 @@ impl Tester { operations_queue: Default::default(), server_handler, state, + config, } } @@ -464,10 +466,11 @@ impl Tester { if start.elapsed().as_secs() > 20 { panic!("Expect load new operation"); } - let all_blocks = self - .storage - .blocks_dal() - .get_ready_for_commit_blocks(VERY_BIG_BLOCK_NUMBER.0 as usize); + let all_blocks = self.storage.blocks_dal().get_ready_for_commit_blocks( + VERY_BIG_BLOCK_NUMBER.0 as usize, + self.config.chain.state_keeper.bootloader_hash, + self.config.chain.state_keeper.default_aa_hash, + ); let blocks: Vec<_> = all_blocks .into_iter() .filter(|block| block.header.number > self.state.last_committed_block) diff --git a/core/tests/testkit/src/utils.rs b/core/tests/testkit/src/utils.rs index 5339f259df7e..ad407218df3a 100644 --- a/core/tests/testkit/src/utils.rs +++ b/core/tests/testkit/src/utils.rs @@ -6,7 +6,7 @@ use std::io::Read; use std::path::PathBuf; use zksync_utils::parse_env; -use zksync_contracts::read_sys_contract_bytecode; +use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage}; use zksync_eth_client::ETHDirectClient; use zksync_eth_signer::PrivateKeySigner; use zksync_types::{l1::L1Tx, web3::types::TransactionReceipt, Address}; @@ -17,7 +17,7 @@ use crate::types::ETHEREUM_ADDRESS; pub fn load_test_bytecode_and_calldata() -> (Vec, Vec, Vec) { let mut dir_path = parse_env::("ZKSYNC_HOME"); dir_path.push("etc/contracts-test-data/e"); - let bytecode = read_sys_contract_bytecode("", "Emitter"); + let bytecode = read_sys_contract_bytecode("", "Emitter", ContractLanguage::Sol); let mut dir_path = parse_env::("ZKSYNC_HOME"); dir_path.push("etc"); diff --git a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol index 13884b71727b..409f3d16b372 100644 --- a/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol +++ b/core/tests/ts-integration/contracts/custom-account/RLPEncoder.sol @@ -75,6 +75,7 @@ library RLPEncoder { /// @notice Uses little endian ordering (The least significant byte has index `0`). /// NOTE: returns `0` for `0` function _highestByteSet(uint256 _number) private pure returns (uint256 hbs) { + // TODO: for optimization, the comparison can be replaced with bitwise operations // should be resolver after evaluating the cost of opcodes. if (_number >= 2**128) { _number >>= 128; diff --git a/core/tests/ts-integration/contracts/custom-account/custom-account.sol b/core/tests/ts-integration/contracts/custom-account/custom-account.sol index 864618ceca12..fc90355ac64e 100644 --- a/core/tests/ts-integration/contracts/custom-account/custom-account.sol +++ b/core/tests/ts-integration/contracts/custom-account/custom-account.sol @@ -15,6 +15,7 @@ contract CustomAccount is IAccount { using TransactionHelper for Transaction; bool public violateValidationRules; + uint256 public gasToSpent; bytes32 public lastTxHash; @@ -34,6 +35,9 @@ contract CustomAccount is IAccount { // out by the compiler emit BootloaderBalance(BOOTLOADER_FORMAL_ADDRESS.balance); } + + uint256 initialGas = gasleft(); + while(initialGas - gasleft() < gasToSpent) {} } function _validateTransaction(bytes32 _suggestedSignedTxHash, Transaction calldata _transaction) internal returns (bytes4 magic) { @@ -47,7 +51,7 @@ contract CustomAccount is IAccount { 0, abi.encodeCall(INonceHolder.incrementMinNonceIfEquals, (_transaction.nonce)) ); - + bytes memory correctSignature = abi.encodePacked(_suggestedSignedTxHash, address(this)); if (keccak256(_transaction.signature) == keccak256(correctSignature)) { @@ -100,10 +104,14 @@ contract CustomAccount is IAccount { _transaction.processPaymasterInput(); } + function setGasToSpent(uint256 _gasToSpent) public { + gasToSpent = _gasToSpent; + } + fallback() external payable { // fallback of default AA shouldn't be called by bootloader under no circumstances - assert(msg.sender != BOOTLOADER_FORMAL_ADDRESS); - + assert(msg.sender != BOOTLOADER_FORMAL_ADDRESS); + // If the contract is called directly, behave like an EOA } diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 59080306c84e..1828a5547c78 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -2,7 +2,7 @@ import '@matterlabs/hardhat-zksync-solc'; export default { zksolc: { - version: '1.3.1', + version: '1.3.7', compilerSource: 'binary', settings: { isSystem: true diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 7d36a9bf270c..eece51e49d99 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -4,7 +4,8 @@ "license": "MIT", "private": true, "scripts": { - "test": "zk f jest" + "test": "zk f jest", + "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts" }, "devDependencies": { "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index d52bf916964f..bf2d8c956f38 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -14,8 +14,10 @@ import { RetryProvider } from './retry-provider'; // // Please DO NOT change these constants if you don't know why you have to do that. Try to debug the particular issue // you face first. -export const L1_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.05'); -export const L2_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.50'); +export const L1_DEFAULT_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.05'); +// Stress tests for L1->L2 transactions on localhost require a lot of upfront payment, but these are skipped during tests on normal environments +export const L1_EXTENDED_TESTS_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.5'); +export const L2_ETH_PER_ACCOUNT = ethers.utils.parseEther('0.5'); export const ERC20_PER_ACCOUNT = ethers.utils.parseEther('10000.0'); /** @@ -74,6 +76,11 @@ export class TestContextOwner { this.mainSyncWallet = new zksync.Wallet(env.mainWalletPK, this.l2Provider, this.l1Provider); } + // Returns the required amount of L1 ETH + requiredL1ETHPerAccount() { + return this.env.network === 'localhost' ? L1_EXTENDED_TESTS_ETH_PER_ACCOUNT : L1_DEFAULT_ETH_PER_ACCOUNT; + } + /** * Performs the test context initialization. * @@ -173,7 +180,7 @@ export class TestContextOwner { ? requiredL2ETHAmount.sub(actualL2ETHAmount) : ethers.BigNumber.from(0); - const requiredL1ETHAmount = L1_ETH_PER_ACCOUNT.mul(accountsAmount).add(l2ETHAmountToDeposit); + const requiredL1ETHAmount = this.requiredL1ETHPerAccount().mul(accountsAmount).add(l2ETHAmountToDeposit); const actualL1ETHAmount = await this.mainSyncWallet.getBalanceL1(); this.reporter.message(`Operator balance on L1 is ${ethers.utils.formatEther(actualL1ETHAmount)} ETH`); @@ -289,7 +296,7 @@ export class TestContextOwner { zksync.utils.ETH_ADDRESS, this.mainEthersWallet, wallets, - L1_ETH_PER_ACCOUNT, + this.requiredL1ETHPerAccount(), nonce, gasPrice, this.reporter diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index 5bb2b3f6351e..1b6d1dff477b 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -68,14 +68,14 @@ export async function anyTransaction(wallet: zksync.Wallet): Promise { // Send a dummy transaction and wait until the new L1 batch is created. - const currentL1Batch = await wallet.provider.getL1BatchNumber(); - await anyTransaction(wallet); + const oldReceipt = await anyTransaction(wallet); // Invariant: even with 1 transaction, l1 batch must be eventually sealed, so this loop must exit. - while ((await wallet.provider.getL1BatchNumber()) <= currentL1Batch) { + while (!(await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash)).l1BatchNumber) { await zksync.utils.sleep(wallet.provider.pollingInterval); } + return await wallet.provider.getTransactionReceipt(oldReceipt.transactionHash); } /** * Waits until the requested block is finalized. diff --git a/core/tests/ts-integration/src/modifiers/balance-checker.ts b/core/tests/ts-integration/src/modifiers/balance-checker.ts index 59dd57ff9724..08a0445bd56f 100644 --- a/core/tests/ts-integration/src/modifiers/balance-checker.ts +++ b/core/tests/ts-integration/src/modifiers/balance-checker.ts @@ -15,10 +15,11 @@ import { IERC20MetadataFactory } from 'zksync-web3/build/typechain'; * modifier, since it also includes the fee check. * * @param wallet Wallet that is expected to pay for a transaction. + * @param isL1ToL2 Optional parameter that, if true, denotes that the checked transaction is an L1->L2 transaction. * @returns Matcher object */ -export async function shouldOnlyTakeFee(wallet: zksync.Wallet): Promise { - return await ShouldChangeBalance.create(zksync.utils.ETH_ADDRESS, [{ wallet, change: 0 }]); +export async function shouldOnlyTakeFee(wallet: zksync.Wallet, isL1ToL2?: boolean): Promise { + return await ShouldChangeBalance.create(zksync.utils.ETH_ADDRESS, [{ wallet, change: 0 }], { l1ToL2: isL1ToL2 }); } /** @@ -78,6 +79,7 @@ export interface BalanceChange { export interface Params { noAutoFeeCheck?: boolean; l1?: boolean; + l1ToL2?: boolean; } /** @@ -97,10 +99,12 @@ class ShouldChangeBalance extends MatcherModifier { balanceChanges: PopulatedBalanceChange[]; noAutoFeeCheck: boolean; l1: boolean; + l1ToL2: boolean; static async create(token: string, balanceChanges: BalanceChange[], params?: Params) { const l1 = params?.l1 ?? false; const noAutoFeeCheck = params?.noAutoFeeCheck ?? false; + const l1ToL2 = params?.l1ToL2 ?? false; if (token == zksync.utils.ETH_ADDRESS && l1 && !noAutoFeeCheck) { throw new Error('ETH balance checks on L1 are not supported'); @@ -119,15 +123,22 @@ class ShouldChangeBalance extends MatcherModifier { }); } - return new ShouldChangeBalance(token, populatedBalanceChanges, noAutoFeeCheck, l1); + return new ShouldChangeBalance(token, populatedBalanceChanges, noAutoFeeCheck, l1, l1ToL2); } - private constructor(token: string, balanceChanges: PopulatedBalanceChange[], noAutoFeeCheck: boolean, l1: boolean) { + private constructor( + token: string, + balanceChanges: PopulatedBalanceChange[], + noAutoFeeCheck: boolean, + l1: boolean, + l1ToL2: boolean + ) { super(); this.token = token; this.balanceChanges = balanceChanges; this.noAutoFeeCheck = noAutoFeeCheck; this.l1 = l1; + this.l1ToL2 = l1ToL2; } async check(receipt: zksync.types.TransactionReceipt): Promise { @@ -140,9 +151,15 @@ class ShouldChangeBalance extends MatcherModifier { // If fee should be checked, we're checking ETH token and this wallet is an initiator, // we should consider fees as well. - if (!this.noAutoFeeCheck && this.token == zksync.utils.ETH_ADDRESS && address == receipt.from) { + const autoFeeCheck = !this.noAutoFeeCheck && this.token == zksync.utils.ETH_ADDRESS; + if (autoFeeCheck) { // To "ignore" subtracted fee, we just add it back to the account balance. - newBalance = newBalance.add(extractFee(receipt).feeAfterRefund); + // For L1->L2 transactions the sender might be different from the refund recipient + if (this.l1ToL2) { + newBalance = newBalance.sub(extractRefundForL1ToL2(receipt, address)); + } else if (address == receipt.from) { + newBalance = newBalance.add(extractFee(receipt).feeAfterRefund); + } } const diff = newBalance.sub(prevBalance); @@ -217,6 +234,46 @@ export function extractFee(receipt: zksync.types.TransactionReceipt, from?: stri }; } +/** + * Helper method to extract the refund for the L1->L2 transaction in ETH wei. + * + * @param receipt Receipt of the transaction to extract fee from. + * @param from Optional substitute to `receipt.from`. + * @returns Extracted fee + */ +function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refundRecipient?: string): ethers.BigNumber { + refundRecipient = refundRecipient ?? receipt.from; + + const mintTopic = ethers.utils.keccak256(ethers.utils.toUtf8Bytes('Mint(address,uint256)')); + + const refundLogs = receipt.logs.filter((log) => { + return log.topics.length == 2 && log.topics[0] == mintTopic; + }); + + if (refundLogs.length === 0) { + throw { + message: `No refund log was found in the following transaction receipt`, + receipt + }; + } + + // Note, that it is important that the refund log is the last log in the receipt, because + // there are multiple `Mint` events during a single L1->L2 transaction, so this one covers the + // final refund. + const refundLog = refundLogs[refundLogs.length - 1]; + + const formattedRefundRecipient = ethers.utils.hexlify(ethers.utils.zeroPad(refundRecipient, 32)); + + if (refundLog.topics[1].toLowerCase() !== formattedRefundRecipient.toLowerCase()) { + throw { + message: `The last ETH minted is not the refund recipient in the following transaction receipt`, + receipt + }; + } + + return ethers.BigNumber.from(refundLog.data); +} + /** * Returns the balance of requested token for a certain address. * diff --git a/core/tests/ts-integration/src/system.ts b/core/tests/ts-integration/src/system.ts index 18126f205956..aaed9620bc76 100644 --- a/core/tests/ts-integration/src/system.ts +++ b/core/tests/ts-integration/src/system.ts @@ -9,6 +9,7 @@ const DIAMOND_UPGRADE_INIT_ABI = new ethers.utils.Interface( const DIAMOND_CUT_FACET_ABI = new ethers.utils.Interface( require(`${L1_CONTRACTS_FOLDER}/zksync/facets/DiamondCut.sol/DiamondCutFacet.json`).abi ); + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 0b9fba72e856..5dbb7b8fe896 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -68,6 +68,15 @@ export class TestMaster { this.mainWallet = new zksync.Wallet(suiteWalletPK, this.l2Provider, this.l1Provider); } + /** + * Returns whether the network is localhost + * + * @returns `true` if the test suite is run on localhost and `false` otherwise. + */ + isLocalHost(): boolean { + return this.env.network == 'localhost'; + } + /** * Returns an instance of the `TestMaster` initialized for the specified suite file. * diff --git a/core/tests/ts-integration/tests/api/explorer.test.ts b/core/tests/ts-integration/tests/api/explorer.test.ts index 695bd75e2f5a..c2b17c8c47ff 100644 --- a/core/tests/ts-integration/tests/api/explorer.test.ts +++ b/core/tests/ts-integration/tests/api/explorer.test.ts @@ -2,7 +2,13 @@ import { TestMaster } from '../../src/index'; import * as zksync from 'zksync-web3'; import * as ethers from 'ethers'; import fetch from 'node-fetch'; -import { anyTransaction, deployContract, getContractSource, getTestContract } from '../../src/helpers'; +import { + anyTransaction, + deployContract, + getContractSource, + getTestContract, + waitForNewL1Batch +} from '../../src/helpers'; import { sleep } from 'zksync-web3/build/src/utils'; import { IERC20MetadataFactory } from 'zksync-web3/build/typechain'; import { extractFee } from '../../src/modifiers/balance-checker'; @@ -94,6 +100,58 @@ describe('Tests for the Explorer API', () => { } }); + test('Should test /l1_batches endpoint', async () => { + if (testMaster.isFastMode()) { + // This test requires a new L1 batch to be created, which may be very time consuming on stage. + return; + } + + // To ensure that the newest batch is not verified yet, we're sealing a new batch. + await waitForNewL1Batch(alice); + + const l1BatchesResponse = await query('/l1_batches', { direction: 'older', limit: '1' }); + expect(l1BatchesResponse).toHaveLength(1); + const apiL1Batch = l1BatchesResponse[0]; + expect(apiL1Batch).toMatchObject({ + number: expect.any(Number), + l1TxCount: expect.any(Number), + l2TxCount: expect.any(Number), + status: 'sealed', + timestamp: expect.any(Number) + }); + + // Sanity checks for the values we can't control. + expect(apiL1Batch.l1TxCount).toBeGreaterThanOrEqual(0); + expect(apiL1Batch.l2TxCount).toBeGreaterThanOrEqual(0); + expectTimestampToBeSane(apiL1Batch.timestamp); + + // Now try to find the same batch using the "newer" query. + const newL1BatchesResponse = await query('/l1_batches', { + from: (apiL1Batch.number - 1).toString(), + direction: 'newer', + limit: '1' + }); + expect(newL1BatchesResponse).toHaveLength(1); + const apiL1BatchCopy = newL1BatchesResponse[0]; + // Response should be the same. + expect(apiL1BatchCopy).toEqual(apiL1Batch); + + // Finally, in the long mode also check, that once l1 batch becomes finalized, status also changes + // in the explorer API. + if (!testMaster.isFastMode()) { + await waitFor(async () => { + const verifiedApiL1Batch = ( + await query('/l1_batches', { + from: (apiL1Batch.number - 1).toString(), + direction: 'newer', + limit: '1' + }) + )[0]; + return verifiedApiL1Batch.status == 'verified'; + }, 'L1 batch was not verified'); + } + }); + test('Should test /block endpoint', async () => { // Send the transaction to query block data about. const tx = await anyTransaction(alice); @@ -101,11 +159,18 @@ describe('Tests for the Explorer API', () => { const apiBlock = await query(`/block/${tx.blockNumber}`); expect(apiBlock).toMatchObject({ number: expect.any(Number), + l1BatchNumber: expect.any(Number), l1TxCount: expect.any(Number), l2TxCount: expect.any(Number), rootHash: expect.stringMatching(HASH_REGEX), status: expect.stringMatching(/sealed|verified/), - timestamp: expect.any(Number) + timestamp: expect.any(Number), + baseSystemContractsHashes: { + bootloader: expect.stringMatching(HASH_REGEX), + default_aa: expect.stringMatching(HASH_REGEX) + }, + l1GasPrice: expect.any(Number), + l2FairGasPrice: expect.any(Number) }); expect(apiBlock.number).toEqual(tx.blockNumber); expect(apiBlock.rootHash).toEqual(tx.blockHash); @@ -128,6 +193,7 @@ describe('Tests for the Explorer API', () => { }, 'Block was not verified'); expect(verifiedBlock).toEqual({ number: expect.any(Number), + l1BatchNumber: expect.any(Number), l1TxCount: expect.any(Number), l2TxCount: expect.any(Number), rootHash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), @@ -138,11 +204,82 @@ describe('Tests for the Explorer API', () => { proveTxHash: expect.stringMatching(HASH_REGEX), provenAt: expect.stringMatching(DATE_REGEX), executeTxHash: expect.stringMatching(HASH_REGEX), - executedAt: expect.stringMatching(DATE_REGEX) + executedAt: expect.stringMatching(DATE_REGEX), + baseSystemContractsHashes: { + bootloader: expect.stringMatching(HASH_REGEX), + default_aa: expect.stringMatching(HASH_REGEX) + }, + l1GasPrice: expect.any(Number), + l2FairGasPrice: expect.any(Number) }); } }); + test('Should test /l1_batch endpoint', async () => { + if (testMaster.isFastMode()) { + // This test requires a new L1 batch to be created, which may be very time consuming on stage. + return; + } + + // Send the transaction to query l1 batch data about. + const tx = await waitForNewL1Batch(alice); + + const apiL1Batch = await query(`/l1_batch/${tx.l1BatchNumber}`); + expect(apiL1Batch).toMatchObject({ + number: expect.any(Number), + l1TxCount: expect.any(Number), + l2TxCount: expect.any(Number), + status: expect.stringMatching(/sealed|verified/), + timestamp: expect.any(Number), + baseSystemContractsHashes: { + bootloader: expect.stringMatching(HASH_REGEX), + default_aa: expect.stringMatching(HASH_REGEX) + }, + l1GasPrice: expect.any(Number), + l2FairGasPrice: expect.any(Number) + }); + expect(apiL1Batch.number).toEqual(tx.l1BatchNumber); + expect(apiL1Batch.l1TxCount).toBeGreaterThanOrEqual(0); + expect(apiL1Batch.l2TxCount).toBeGreaterThanOrEqual(1); // We know that at least 1 tx is included there. + expectTimestampToBeSane(apiL1Batch.timestamp); + + // Check that L1 transaction count can also be non-zero. + const l1Tx = await alice.deposit({ token: zksync.utils.ETH_ADDRESS, amount: 1 }).then((tx) => tx.wait()); + // Wait for l1 batch to be sealed. + await waitForNewL1Batch(alice); + const l1TxReceipt = await alice.provider.getTransactionReceipt(l1Tx.transactionHash); + + const l1BatchWithL1Tx = await query(`/l1_batch/${l1TxReceipt.l1BatchNumber}`); + expect(l1BatchWithL1Tx.l1TxCount).toBeGreaterThanOrEqual(1); + + // Wait until the block is verified and check that the required fields are set. + let verifiedL1Batch = null; + await waitFor(async () => { + verifiedL1Batch = await query(`/l1_batch/${tx.l1BatchNumber}`); + return verifiedL1Batch.status == 'verified'; + }, 'Block was not verified'); + expect(verifiedL1Batch).toEqual({ + number: expect.any(Number), + l1TxCount: expect.any(Number), + l2TxCount: expect.any(Number), + rootHash: expect.stringMatching(/^0x[\da-fA-F]{64}$/), + status: 'verified', + timestamp: expect.any(Number), + commitTxHash: expect.stringMatching(HASH_REGEX), + committedAt: expect.stringMatching(DATE_REGEX), + proveTxHash: expect.stringMatching(HASH_REGEX), + provenAt: expect.stringMatching(DATE_REGEX), + executeTxHash: expect.stringMatching(HASH_REGEX), + executedAt: expect.stringMatching(DATE_REGEX), + baseSystemContractsHashes: { + bootloader: expect.stringMatching(HASH_REGEX), + default_aa: expect.stringMatching(HASH_REGEX) + }, + l1GasPrice: expect.any(Number), + l2FairGasPrice: expect.any(Number) + }); + }); + test('Should test /account endpoint for an EOA', async () => { // Check response for the empty account. const newEoa = testMaster.newEmptyAccount(); @@ -295,6 +432,24 @@ describe('Tests for the Explorer API', () => { type: tx.type }); + // Perform L1 batch-related checks in the long mode only. + if (!testMaster.isFastMode()) { + const tx = await waitForNewL1Batch(alice); + const response: any = await query('/transactions', { + l1BatchNumber: tx.l1BatchNumber.toString(), + limit: '100', + direction: 'older' + }); + expect(response).toEqual({ + total: expect.any(Number), + list: expect.anything() + }); + expect(response.total).toBeGreaterThanOrEqual(1); + + const apiTx = response.list.find((apiTx: any) => apiTx.transactionHash == tx.transactionHash); + expect(apiTx).toBeDefined(); + } + // Check other query parameters combinations const backwards = await query('/transactions', { limit: '1', @@ -309,18 +464,29 @@ describe('Tests for the Explorer API', () => { }); expect(forward.list.length).toEqual(1); - const account = await query('/transactions', { - limit: '1', + const tom = testMaster.newEmptyAccount(); + await alice.transfer({ to: tom.address, amount }).then((tx) => tx.wait()); + + // Alice sent at least 2 txs: to Bob and to Tom. + let accountTxs = await query('/transactions', { + limit: '2', direction: 'older', - account: alice.address + accountAddress: alice.address }); - expect(account.list.length).toEqual(1); + expect(accountTxs.list.length).toEqual(2); + // Tom received only 1 tx from Alice. + accountTxs = await query('/transactions', { + limit: '10', + direction: 'older', + accountAddress: tom.address + }); + expect(accountTxs.list.length).toEqual(1); // Invariant: ERC20 tokens are distributed during init, so it must have transactions. const contract = await query('/transactions', { limit: '1', direction: 'older', - contract: erc20.l2Address + contractAddress: erc20.l2Address }); expect(contract.list.length).toEqual(1); }); @@ -400,12 +566,13 @@ describe('Tests for the Explorer API', () => { const requestBody = { contractAddress: counterContract.address, - contractName: 'Counter', + contractName: 'contracts/counter/counter.sol:Counter', sourceCode: getContractSource('counter/counter.sol'), - compilerZksolcVersion: 'v1.3.1', + compilerZksolcVersion: 'v1.3.7', compilerSolcVersion: '0.8.16', optimizationUsed: true, - constructorArguments + constructorArguments, + isSystem: true }; let requestId = await query('/contract_verification', undefined, requestBody); @@ -428,8 +595,8 @@ describe('Tests for the Explorer API', () => { const standardJsonInput = { language: 'Solidity', sources: { - 'create.sol': { content: getContractSource('create/create.sol') }, - 'Foo.sol': { content: getContractSource('create/Foo.sol') } + 'contracts/create/create.sol': { content: getContractSource('create/create.sol') }, + 'contracts/create/Foo.sol': { content: getContractSource('create/Foo.sol') } }, settings: { optimizer: { enabled: true } @@ -440,13 +607,14 @@ describe('Tests for the Explorer API', () => { const requestBody = { contractAddress: importContract.address, - contractName: 'create.sol:Import', + contractName: 'contracts/create/create.sol:Import', sourceCode: standardJsonInput, codeFormat: 'solidity-standard-json-input', - compilerZksolcVersion: 'v1.3.1', + compilerZksolcVersion: 'v1.3.7', compilerSolcVersion: '0.8.16', optimizationUsed: true, - constructorArguments + constructorArguments, + isSystem: true }; let requestId = await query('/contract_verification', undefined, requestBody); diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index dbeb3883c9e2..c800884c8783 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -594,6 +594,11 @@ describe('web3 API compatibility tests', () => { expect(thrown).toBeTruthy(); }); + test('Should throw error for estimate gas for account with balance < tx.value', async () => { + let poorBob = testMaster.newEmptyAccount(); + expect(poorBob.estimateGas({ value: 1, to: alice.address })).toBeRejected('insufficient balance for transfer'); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index de26528954cc..103be2c468a7 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -24,7 +24,8 @@ const contracts = { ...getTestContract('Import'), factoryDep: getTestContract('Foo').bytecode }, - context: getTestContract('Context') + context: getTestContract('Context'), + error: getTestContract('SimpleRequire') }; describe('Smart contract behavior checks', () => { @@ -97,8 +98,6 @@ describe('Smart contract behavior checks', () => { const infiniteLoop = await deployContract(alice, contracts.infinite, []); // Test eth_call first - // await expect(infiniteLoop.callStatic.infiniteLoop()).toBeRejected('cannot estimate transaction: out of gas'); - // ...and then an actual transaction await expect(infiniteLoop.infiniteLoop({ gasLimit: 1_000_000 })).toBeReverted([]); }); @@ -196,6 +195,13 @@ describe('Smart contract behavior checks', () => { }); }); + test('Should return correct error during fee estimation', async () => { + const errorContract = await deployContract(alice, contracts.error, []); + + await expect(errorContract.estimateGas.require_long()).toBeRejected('longlonglong'); + await expect(errorContract.require_long()).toBeRejected('longlonglong'); + }); + test('Should check block properties for tx execution', async () => { if (testMaster.isFastMode()) { // This test requires a new L1 batch to be created, which may be very time consuming on stage. @@ -257,6 +263,38 @@ describe('Smart contract behavior checks', () => { ).toBeAccepted([]); }); + test('Should successfully publish a large packable bytecode', async () => { + // The rough length of the packed bytecode should be 350_000 / 4 = 87500, + // which should fit into a batch + const BYTECODE_LEN = 350_016 + 32; // +32 to ensure validity of the bytecode + + // Our current packing algorithm uses 8-byte chunks for dictionary and + // so in order to make an effectively-packable bytecode, we need to have bytecode + // consist of the same 2 types of 8-byte chunks. + // Note, that instead of having 1 type of 8-byte chunks, we need 2 in order to have + // a unique bytecode for each test run. + const CHUNK_TYPE_1 = '00000000'; + const CHUNK_TYPE_2 = 'ffffffff'; + + let bytecode = '0x'; + while (bytecode.length < BYTECODE_LEN * 2 + 2) { + if (Math.random() < 0.5) { + bytecode += CHUNK_TYPE_1; + } else { + bytecode += CHUNK_TYPE_2; + } + } + + await expect( + alice.sendTransaction({ + to: alice.address, + customData: { + factoryDeps: [bytecode] + } + }) + ).toBeAccepted([]); + }); + afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/custom-account.test.ts b/core/tests/ts-integration/tests/custom-account.test.ts index 8df5a93b2ea8..a488215fe2c1 100644 --- a/core/tests/ts-integration/tests/custom-account.test.ts +++ b/core/tests/ts-integration/tests/custom-account.test.ts @@ -156,6 +156,86 @@ describe('Tests for the custom account behavior', () => { await expect(sendCustomAccountTransaction(customAATx, alice.provider, customAccount.address)).toBeAccepted([]); }); + test('API should reject validation that takes too many computational ergs', async () => { + const violateStorageRules = false; + const badCustomAccount = await deployContract( + alice, + contracts.customAccount, + [violateStorageRules], + 'createAccount' + ); + badCustomAccount.connect(alice); + + // Fund the account. + await alice + .transfer({ + to: badCustomAccount.address, + amount: ETH_PER_CUSTOM_ACCOUNT + }) + .then((tx) => tx.wait()); + await alice + .transfer({ + to: badCustomAccount.address, + token: erc20Address, + amount: TRANSFER_AMOUNT + }) + .then((tx) => tx.wait()); + + // Set flag to do many calculations during validation. + const validationGasLimit = +process.env.CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT!; + await badCustomAccount.setGasToSpent(validationGasLimit).then((tx: any) => tx.wait()); + + let tx = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + await expect(sendCustomAccountTransaction(tx, alice.provider, badCustomAccount.address)).toBeRejected( + 'Violated validation rules: Took too many computational gas' + ); + }); + + test('State keeper should reject validation that takes too many computational ergs', async () => { + const violateStorageRules = false; + const badCustomAccount = await deployContract( + alice, + contracts.customAccount, + [violateStorageRules], + 'createAccount' + ); + badCustomAccount.connect(alice); + + // Fund the account. + await alice + .transfer({ + to: badCustomAccount.address, + amount: ETH_PER_CUSTOM_ACCOUNT + }) + .then((tx) => tx.wait()); + await alice + .transfer({ + to: badCustomAccount.address, + token: erc20Address, + amount: TRANSFER_AMOUNT + }) + .then((tx) => tx.wait()); + + const transfer = await erc20.populateTransaction.transfer(alice.address, TRANSFER_AMOUNT); + const nonce = await alice.provider.getTransactionCount(badCustomAccount.address); + + // Create a *promise* that would await for the rejection. + // Even though we use `toBeReverted` matcher, we'll check that it's actually rejected based on the nonce. + // However, we use `await` on the `sendTransaction` to make sure that tx is past the API server checks. + const rejectionCheckPromise = expect( + await sendCustomAccountTransaction(transfer, alice.provider, badCustomAccount.address, undefined, nonce + 1) + ).toBeReverted(); + + // Increase nonce and set flag to do many calculations during validation. + const validationGasLimit = +process.env.CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT!; + const tx = await badCustomAccount.populateTransaction.setGasToSpent(validationGasLimit); + await expect( + sendCustomAccountTransaction(tx, alice.provider, badCustomAccount.address, undefined, nonce) + ).toBeAccepted(); + + await rejectionCheckPromise; + }); + afterAll(async () => { await testMaster.deinitialize(); }); @@ -167,7 +247,8 @@ async function sendCustomAccountTransaction( tx: ethers.PopulatedTransaction, web3Provider: zksync.Provider, accountAddress: string, - customSignature?: Uint8Array + customSignature?: Uint8Array, + nonce?: number ) { const gasLimit = await web3Provider.estimateGas({ ...tx, @@ -179,7 +260,7 @@ async function sendCustomAccountTransaction( tx.gasPrice = gasPrice; tx.chainId = parseInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!, 10); tx.value = ethers.BigNumber.from(0); - tx.nonce = await web3Provider.getTransactionCount(accountAddress); + tx.nonce = nonce ?? (await web3Provider.getTransactionCount(accountAddress)); tx.type = 113; tx.from = accountAddress; diff --git a/core/tests/ts-integration/tests/erc20.test.ts b/core/tests/ts-integration/tests/erc20.test.ts index d56d2ba9140b..b5ffb8f24537 100644 --- a/core/tests/ts-integration/tests/erc20.test.ts +++ b/core/tests/ts-integration/tests/erc20.test.ts @@ -50,7 +50,7 @@ describe('ERC20 contract checks', () => { const l2BalanceChange = await shouldChangeTokenBalances(tokenDetails.l2Address, [ { wallet: alice, change: amount } ]); - const feeCheck = await shouldOnlyTakeFee(alice); + const feeCheck = await shouldOnlyTakeFee(alice, true); await expect( alice.deposit({ token: tokenDetails.l1Address, @@ -176,6 +176,7 @@ describe('ERC20 contract checks', () => { to: ethers.constants.AddressZero, token: tokenDetails.l1Address, amount, + l2GasLimit: 5_000_000, // Setting the limit manually to avoid estimation for L1->L2 transaction approveERC20: true }); const l1Receipt = await depositHandle.waitL1Commit(); diff --git a/core/tests/ts-integration/tests/ether.test.ts b/core/tests/ts-integration/tests/ether.test.ts index f875f554b57c..8cecaff17c5c 100644 --- a/core/tests/ts-integration/tests/ether.test.ts +++ b/core/tests/ts-integration/tests/ether.test.ts @@ -31,11 +31,29 @@ describe('ETH token checks', () => { const l1EthBalanceBefore = await alice.getBalanceL1(); // No need to check fee as the L1->L2 are free for now const l2ethBalanceChange = await shouldChangeETHBalances([{ wallet: alice, change: amount }], { - noAutoFeeCheck: true + l1ToL2: true }); + + const l2GasLimit = await zksync.utils.estimateDefaultBridgeDepositL2Gas( + alice.providerL1!, + alice.provider, + ETH_ADDRESS, + amount, + alice.address, + alice.address + ); + const gasPerPubdataByte = zksync.utils.REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; + const expectedL2Costs = await alice.getBaseCost({ + gasLimit: l2GasLimit, + gasPerPubdataByte, + gasPrice: await gasPrice + }); + const depositOp = alice.deposit({ token: ETH_ADDRESS, amount, + gasPerPubdataByte, + l2GasLimit, overrides: { gasPrice } @@ -44,7 +62,10 @@ describe('ETH token checks', () => { const depositFee = await depositOp .then((op) => op.waitL1Commit()) - .then((receipt) => receipt.gasUsed.mul(receipt.effectiveGasPrice)); + .then(async (receipt) => { + const l1GasFee = receipt.gasUsed.mul(receipt.effectiveGasPrice); + return l1GasFee.add(expectedL2Costs); + }); const l1EthBalanceAfter = await alice.getBalanceL1(); expect(l1EthBalanceBefore.sub(depositFee).sub(l1EthBalanceAfter)).bnToBeEq(amount); }); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts new file mode 100644 index 000000000000..a4f0b0e9628d --- /dev/null +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -0,0 +1,217 @@ +/** + * This suite contains tests displaying prices for some of the most common operations under various L1 gas prices. + * + * IMPORTANT: this test affects the internal state of the server and so + * it should never be run in parallel with other tests. + * + * Locally, for maximal representation the test should be run with batches of size 1. + * However, we do not want to overload the CI for such purposes and so the job of the CI would be to make + * sure that the test is maintained does not get broken. + * + */ +import * as utils from 'zk/build/utils'; +import { TestMaster } from '../src/index'; + +import * as zksync from 'zksync-web3'; +import { BigNumber, ethers } from 'ethers'; +import { Token } from '../src/types'; + +// Unless `RUN_FEE_TEST` is provided, skip the test suit +const testFees = process.env.RUN_FEE_TEST ? describe : describe.skip; + +// The L1 gas prices under which the test will be conducted. +// For CI we use only 2 gas prices to not slow it down too much. +const L1_GAS_PRICES_TO_TEST = process.env.CI + ? [ + 5_000_000_000, // 5 gwei + 10_000_000_000 // 10 gwei + ] + : [ + 1_000_000_000, // 1 gwei + 5_000_000_000, // 5 gwei + 10_000_000_000, // 10 gwei + 25_000_000_000, // 25 gwei + 50_000_000_000, // 50 gwei + 100_000_000_000, // 100 gwei + 200_000_000_000, // 200 gwei + 400_000_000_000, // 400 gwei + 800_000_000_000, // 800 gwei + 1_000_000_000_000, // 1000 gwei + 2_000_000_000_000 // 2000 gwei + ]; + +testFees('Test fees', () => { + let testMaster: TestMaster; + let alice: zksync.Wallet; + + let tokenDetails: Token; + let aliceErc20: zksync.Contract; + + beforeAll(() => { + testMaster = TestMaster.getInstance(__filename); + alice = testMaster.mainAccount(); + + tokenDetails = testMaster.environment().erc20Token; + aliceErc20 = new ethers.Contract(tokenDetails.l1Address, zksync.utils.IERC20, alice.ethWallet()); + }); + + test('Test fees', async () => { + const receiver = ethers.Wallet.createRandom().address; + + // Getting ETH price in gas. + const feeTestL1Receipt = await ( + await alice.ethWallet().sendTransaction({ + to: receiver, + value: BigNumber.from(1) + }) + ).wait(); + + const feeTestL1ReceiptERC20 = await ( + await alice.ethWallet().sendTransaction({ + to: aliceErc20.address, + data: aliceErc20.interface.encodeFunctionData('transfer', [receiver, BigNumber.from(1)]) + }) + ).wait(); + + let reports = ['ETH transfer:\n\n', 'ERC20 transfer:\n\n']; + for (const gasPrice of L1_GAS_PRICES_TO_TEST) { + reports = await appendResults( + alice, + [feeTestL1Receipt, feeTestL1ReceiptERC20], + // We always regenerate new addresses for transaction requests in order to estimate the cost for a new account + [ + { + to: ethers.Wallet.createRandom().address, + value: BigNumber.from(1) + }, + { + data: aliceErc20.interface.encodeFunctionData('transfer', [ + ethers.Wallet.createRandom().address, + BigNumber.from(1) + ]), + to: tokenDetails.l2Address + } + ], + gasPrice, + reports + ); + } + + await setInternalL1GasPrice(alice._providerL2(), undefined, true); + + console.log(`Full report: \n\n${reports.join('\n\n')}`); + }); + + afterAll(async () => { + await testMaster.deinitialize(); + }); +}); + +async function appendResults( + sender: zksync.Wallet, + originalL1Receipts: ethers.providers.TransactionReceipt[], + transactionRequests: ethers.providers.TransactionRequest[], + newL1GasPrice: number, + reports: string[] +): Promise { + await setInternalL1GasPrice(sender._providerL2(), newL1GasPrice.toString()); + + if (originalL1Receipts.length !== reports.length && originalL1Receipts.length !== transactionRequests.length) { + throw new Error('The array of receipts and reports have different length'); + } + + const results = []; + + for (let i = 0; i < originalL1Receipts.length; i++) { + const receipt = originalL1Receipts[i]; + const request = transactionRequests[i]; + const oldReport = reports[i]; + + results.push(await updateReport(sender, receipt, request, newL1GasPrice, oldReport)); + } + + return results; +} + +async function updateReport( + sender: zksync.Wallet, + l1Receipt: ethers.providers.TransactionReceipt, + transactionRequest: ethers.providers.TransactionRequest, + newL1GasPrice: number, + oldReport: string +): Promise { + const expectedL1Price = +ethers.utils.formatEther(l1Receipt.gasUsed.mul(newL1GasPrice)); + + const estimatedL2GasPrice = await sender.getGasPrice(); + const estimatedL2GasLimit = await sender.estimateGas(transactionRequest); + const estimatedPrice = estimatedL2GasPrice.mul(estimatedL2GasLimit); + + const balanceBefore = await sender.getBalance(); + await (await sender.sendTransaction(transactionRequest)).wait(); + const balanceAfter = await sender.getBalance(); + const balanceDiff = balanceBefore.sub(balanceAfter); + + const l2PriceAsNumber = +ethers.utils.formatEther(balanceDiff); + const l2EstimatedPriceAsNumber = +ethers.utils.formatEther(estimatedPrice); + + const gasReport = `Gas price ${newL1GasPrice / 1000000000} gwei: + L1 cost ${expectedL1Price}, + L2 estimated cost: ${l2EstimatedPriceAsNumber} + Estimated Gain: ${expectedL1Price / l2EstimatedPriceAsNumber} + L2 cost: ${l2PriceAsNumber}, + Gain: ${expectedL1Price / l2PriceAsNumber}\n`; + console.log(gasReport); + + return oldReport + gasReport; +} + +async function killServerAndWaitForShutdown(provider: zksync.Provider) { + await utils.exec('pkill zksync_server'); + // Wait until it's really stopped. + let iter = 0; + while (iter < 30) { + try { + await provider.getBlockNumber(); + await utils.sleep(5); + iter += 1; + } catch (_) { + // When exception happens, we assume that server died. + return; + } + } + // It's going to panic anyway, since the server is a singleton entity, so better to exit early. + throw new Error("Server didn't stop after a kill request"); +} + +async function setInternalL1GasPrice(provider: zksync.Provider, newPrice?: string, disconnect?: boolean) { + // Make sure server isn't running. + try { + await killServerAndWaitForShutdown(provider); + } catch (_) {} + + // Run server in background. + let command = 'zk server --components api,tree,tree_lightweight,eth,data_fetcher,state_keeper'; + if (newPrice) { + command = `ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE=${newPrice} ${command}`; + } + const zkSyncServer = utils.background(command, 'ignore'); + + if (disconnect) { + zkSyncServer.unref(); + } + + // Server may need some time to recompile if it's a cold run, so wait for it. + let iter = 0; + let mainContract; + while (iter < 30 && !mainContract) { + try { + mainContract = await provider.getMainContractAddress(); + } catch (_) { + await utils.sleep(5); + iter += 1; + } + } + if (!mainContract) { + throw new Error('Server did not start'); + } +} diff --git a/core/tests/ts-integration/tests/l1.test.ts b/core/tests/ts-integration/tests/l1.test.ts index 8eab00bec54c..44bd543519fe 100644 --- a/core/tests/ts-integration/tests/l1.test.ts +++ b/core/tests/ts-integration/tests/l1.test.ts @@ -52,7 +52,6 @@ describe('Tests for L1 behavior', () => { alice.requestExecute({ contractAddress: counterContract.address, calldata, - l2GasLimit: DEFAULT_L2_GAS_LIMIT, overrides: { gasPrice } @@ -69,7 +68,6 @@ describe('Tests for L1 behavior', () => { alice.requestExecute({ contractAddress: contextContract.address, calldata, - l2GasLimit: DEFAULT_L2_GAS_LIMIT, l2Value, overrides: { gasPrice @@ -143,7 +141,8 @@ describe('Tests for L1 behavior', () => { calldata: '0x', l2GasLimit: l2GasLimit + 1, overrides: { - gasPrice + gasPrice, + gasLimit: 600_000 } }); let thrown = false; @@ -167,6 +166,12 @@ describe('Tests for L1 behavior', () => { }); test('Should revert l1 tx with too many initial storage writes', async () => { + // This test sends a transaction that consumes a lot of L2 ergs and so may be too expensive for + // stage environment. That's why we only test it on the local environment (which includes CI). + if (!testMaster.isLocalHost()) { + return; + } + const contract = await deployContract(alice, contracts.writesAndMessages, []); // The circuit allows us to have ~4700 initial writes for an L1 batch. // We check that we will run out of gas if we do a bit smaller amount of writes. @@ -190,6 +195,12 @@ describe('Tests for L1 behavior', () => { }); test('Should revert l1 tx with too many repeated storage writes', async () => { + // This test sends a transaction that consumes a lot of L2 ergs and so may be too expensive for + // stage environment. That's why we only test it on the local environment (which includes CI). + if (!testMaster.isLocalHost()) { + return; + } + const contract = await deployContract(alice, contracts.writesAndMessages, []); // The circuit allows us to have ~7500 repeated writes for an L1 batch. // We check that we will run out of gas if we do a bit smaller amount of writes. @@ -231,6 +242,12 @@ describe('Tests for L1 behavior', () => { }); test('Should revert l1 tx with too many l2 to l1 messages', async () => { + // This test sends a transaction that consumes a lot of L2 ergs and so may be too expensive for + // stage environment. That's why we only test it on the local environment (which includes CI). + if (!testMaster.isLocalHost()) { + return; + } + const contract = await deployContract(alice, contracts.writesAndMessages, []); // The circuit allows us to have 512 L2->L1 logs for an L1 batch. // We check that we will run out of gas if we send a bit smaller amount of L2->L1 logs. @@ -254,6 +271,12 @@ describe('Tests for L1 behavior', () => { }); test('Should revert l1 tx with too big l2 to l1 message', async () => { + // This test sends a transaction that consumes a lot of L2 ergs and so may be too expensive for + // stage environment. That's why we only test it on the local environment (which includes CI). + if (!testMaster.isLocalHost()) { + return; + } + const contract = await deployContract(alice, contracts.writesAndMessages, []); const MAX_PUBDATA_PER_BLOCK = ethers.BigNumber.from(SYSTEM_CONFIG['MAX_PUBDATA_PER_BLOCK']); // We check that we will run out of gas if we send a bit @@ -314,60 +337,52 @@ function maxL2GasLimitForPriorityTxs(): number { // using binary search. let maxGasBodyLimit = +process.env.CONTRACTS_PRIORITY_TX_MAX_GAS_LIMIT!; - const overhead = 0; - // const overhead = getOverheadForTransaction( - // ethers.BigNumber.from(maxGasBodyLimit), - // ethers.BigNumber.from(zksync.utils.DEFAULT_GAS_PER_PUBDATA_LIMIT), - // // We can just pass 0 as `encodingLength` because `overheadForPublicData` and `overheadForGas` - // // will be greater than `overheadForLength` for large `gasLimit`. - // ethers.BigNumber.from(0) - // ); + const overhead = getOverheadForTransaction( + ethers.BigNumber.from(maxGasBodyLimit), + ethers.BigNumber.from(zksync.utils.REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT), + // We can just pass 0 as `encodingLength` because `overheadForPublicData` and `overheadForGas` + // will be greater than `overheadForLength` for large `gasLimit`. + ethers.BigNumber.from(0) + ); return maxGasBodyLimit + overhead; } -// function getOverheadForTransaction( -// bodyGasLimit: ethers.BigNumber, -// gasPricePerPubdata: ethers.BigNumber, -// encodingLength: ethers.BigNumber -// ): number { -// const BLOCK_OVERHEAD_L2_GAS = ethers.BigNumber.from(SYSTEM_CONFIG['BLOCK_OVERHEAD_L2_GAS']); -// const L1_GAS_PER_PUBDATA_BYTE = ethers.BigNumber.from(SYSTEM_CONFIG['L1_GAS_PER_PUBDATA_BYTE']); -// const BLOCK_OVERHEAD_L1_GAS = ethers.BigNumber.from(SYSTEM_CONFIG['BLOCK_OVERHEAD_L1_GAS']); -// const BLOCK_OVERHEAD_PUBDATA = BLOCK_OVERHEAD_L1_GAS.div(L1_GAS_PER_PUBDATA_BYTE); - -// const MAX_TRANSACTIONS_IN_BLOCK = ethers.BigNumber.from(SYSTEM_CONFIG['MAX_TRANSACTIONS_IN_BLOCK']); -// const BOOTLOADER_TX_ENCODING_SPACE = ethers.BigNumber.from(SYSTEM_CONFIG['BOOTLOADER_TX_ENCODING_SPACE']); -// const MAX_PUBDATA_PER_BLOCK = ethers.BigNumber.from(SYSTEM_CONFIG['MAX_PUBDATA_PER_BLOCK']); -// const L2_TX_MAX_GAS_LIMIT = ethers.BigNumber.from(SYSTEM_CONFIG['L2_TX_MAX_GAS_LIMIT']); - -// const maxBlockOverhead = BLOCK_OVERHEAD_L2_GAS.add(BLOCK_OVERHEAD_PUBDATA.mul(gasPricePerPubdata)); - -// // The overhead from taking up the transaction's slot -// const txSlotOverhead = ceilDiv(maxBlockOverhead, MAX_TRANSACTIONS_IN_BLOCK); -// let blockOverheadForTransaction = txSlotOverhead; - -// // The overhead for occupying the bootloader memory can be derived from encoded_len -// const overheadForLength = ceilDiv(encodingLength.mul(maxBlockOverhead), BOOTLOADER_TX_ENCODING_SPACE); -// if (overheadForLength.gt(blockOverheadForTransaction)) { -// blockOverheadForTransaction = overheadForLength; -// } - -// // The overhead for possible published public data -// let maxPubdataInTx = ceilDiv(bodyGasLimit, gasPricePerPubdata); -// let overheadForPublicData = ceilDiv(maxPubdataInTx.mul(maxBlockOverhead), MAX_PUBDATA_PER_BLOCK); -// if (overheadForPublicData.gt(blockOverheadForTransaction)) { -// blockOverheadForTransaction = overheadForPublicData; -// } - -// // The overhead for gas that could be used to use single-instance circuits -// let overheadForSingleInstanceCircuits = ceilDiv(bodyGasLimit.mul(maxBlockOverhead), L2_TX_MAX_GAS_LIMIT); -// if (overheadForSingleInstanceCircuits.gt(blockOverheadForTransaction)) { -// blockOverheadForTransaction = overheadForSingleInstanceCircuits; -// } - -// return blockOverheadForTransaction.toNumber(); -// } - -// function ceilDiv(a: ethers.BigNumber, b: ethers.BigNumber): ethers.BigNumber { -// return a.add(b.sub(1)).div(b); -// } +function getOverheadForTransaction( + bodyGasLimit: ethers.BigNumber, + gasPricePerPubdata: ethers.BigNumber, + encodingLength: ethers.BigNumber +): number { + const BLOCK_OVERHEAD_L2_GAS = ethers.BigNumber.from(SYSTEM_CONFIG['BLOCK_OVERHEAD_L2_GAS']); + const L1_GAS_PER_PUBDATA_BYTE = ethers.BigNumber.from(SYSTEM_CONFIG['L1_GAS_PER_PUBDATA_BYTE']); + const BLOCK_OVERHEAD_L1_GAS = ethers.BigNumber.from(SYSTEM_CONFIG['BLOCK_OVERHEAD_L1_GAS']); + const BLOCK_OVERHEAD_PUBDATA = BLOCK_OVERHEAD_L1_GAS.div(L1_GAS_PER_PUBDATA_BYTE); + + const MAX_TRANSACTIONS_IN_BLOCK = ethers.BigNumber.from(SYSTEM_CONFIG['MAX_TRANSACTIONS_IN_BLOCK']); + const BOOTLOADER_TX_ENCODING_SPACE = ethers.BigNumber.from(SYSTEM_CONFIG['BOOTLOADER_TX_ENCODING_SPACE']); + // const MAX_PUBDATA_PER_BLOCK = ethers.BigNumber.from(SYSTEM_CONFIG['MAX_PUBDATA_PER_BLOCK']); + const L2_TX_MAX_GAS_LIMIT = ethers.BigNumber.from(SYSTEM_CONFIG['L2_TX_MAX_GAS_LIMIT']); + + const maxBlockOverhead = BLOCK_OVERHEAD_L2_GAS.add(BLOCK_OVERHEAD_PUBDATA.mul(gasPricePerPubdata)); + + // The overhead from taking up the transaction's slot + const txSlotOverhead = ceilDiv(maxBlockOverhead, MAX_TRANSACTIONS_IN_BLOCK); + let blockOverheadForTransaction = txSlotOverhead; + + // The overhead for occupying the bootloader memory can be derived from encoded_len + const overheadForLength = ceilDiv(encodingLength.mul(maxBlockOverhead), BOOTLOADER_TX_ENCODING_SPACE); + if (overheadForLength.gt(blockOverheadForTransaction)) { + blockOverheadForTransaction = overheadForLength; + } + + // The overhead for gas that could be used to use single-instance circuits + let overheadForSingleInstanceCircuits = ceilDiv(bodyGasLimit.mul(maxBlockOverhead), L2_TX_MAX_GAS_LIMIT); + if (overheadForSingleInstanceCircuits.gt(blockOverheadForTransaction)) { + blockOverheadForTransaction = overheadForSingleInstanceCircuits; + } + + return blockOverheadForTransaction.toNumber(); +} + +function ceilDiv(a: ethers.BigNumber, b: ethers.BigNumber): ethers.BigNumber { + return a.add(b.sub(1)).div(b); +} diff --git a/core/tests/upgrade-test/package.json b/core/tests/upgrade-test/package.json new file mode 100644 index 000000000000..ca3ed7acd6ae --- /dev/null +++ b/core/tests/upgrade-test/package.json @@ -0,0 +1,38 @@ +{ + "name": "upgrade-test", + "version": "1.0.0", + "license": "MIT", + "mocha": { + "timeout": 240000, + "exit": true, + "color": false, + "slow": 0, + "require": [ + "ts-node/register", + "mocha-steps" + ] + }, + "scripts": { + "upgrade-test": "zk f mocha tests/upgrade.test.ts" + }, + "devDependencies": { + "@types/chai": "^4.2.21", + "@types/mocha": "^8.2.3", + "@types/mocha-steps": "^1.3.0", + "@types/node": "^14.14.5", + "@types/node-fetch": "^2.5.7", + "chai": "^4.3.4", + "chai-as-promised": "^7.1.1", + "ethereumjs-abi": "^0.6.8", + "ethers": "~5.7.0", + "mocha": "^9.0.2", + "mocha-steps": "^1.3.0", + "node-fetch": "^2.6.1", + "ts-node": "^10.1.0", + "typescript": "^4.3.5", + "zksync-web3": "link:../../../sdk/zksync-web3.js" + }, + "dependencies": { + "prettier": "^2.3.2" + } +} diff --git a/core/tests/upgrade-test/tests/tester.ts b/core/tests/upgrade-test/tests/tester.ts new file mode 100644 index 000000000000..c490c9062f21 --- /dev/null +++ b/core/tests/upgrade-test/tests/tester.ts @@ -0,0 +1,69 @@ +import * as ethers from 'ethers'; +import * as zkweb3 from 'zksync-web3'; +import * as fs from 'fs'; +import * as path from 'path'; + +type Network = string; + +export class Tester { + public runningFee: Map; + constructor( + public network: Network, + public ethProvider: ethers.providers.Provider, + public ethWallet: ethers.Wallet, + public syncWallet: zkweb3.Wallet, + public web3Provider: zkweb3.Provider + ) { + this.runningFee = new Map(); + } + + // prettier-ignore + static async init(network: Network) { + const ethProvider = new ethers.providers.JsonRpcProvider(process.env.L1_RPC_ADDRESS || process.env.ETH_CLIENT_WEB3_URL); + + let ethWallet; + if (network == 'localhost') { + ethProvider.pollingInterval = 100; + + const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant`); + const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + ethWallet = ethers.Wallet.fromMnemonic( + ethTestConfig.test_mnemonic as string, + "m/44'/60'/0'/0/0" + ) + } + else { + ethWallet = new ethers.Wallet(process.env.MASTER_WALLET_PK!); + } + ethWallet = ethWallet.connect(ethProvider); + const web3Provider = new zkweb3.Provider(process.env.ZKSYNC_WEB3_API_URL || "http://localhost:3050"); + web3Provider.pollingInterval = 100; // It's OK to keep it low even on stage. + const syncWallet = new zkweb3.Wallet(ethWallet.privateKey, web3Provider, ethProvider); + + + // Since some tx may be pending on stage, we don't want to get stuck because of it. + // In order to not get stuck transactions, we manually cancel all the pending txs. + const latestNonce = await ethWallet.getTransactionCount('latest'); + const pendingNonce = await ethWallet.getTransactionCount('pending'); + const cancellationTxs = []; + for (let nonce = latestNonce; nonce != pendingNonce; nonce++) { + // For each transaction to override it, we need to provide greater fee. + // We would manually provide a value high enough (for a testnet) to be both valid + // and higher than the previous one. It's OK as we'll only be charged for the bass fee + // anyways. We will also set the miner's tip to 5 gwei, which is also much higher than the normal one. + const maxFeePerGas = ethers.utils.parseEther("0.00000025"); // 250 gwei + const maxPriorityFeePerGas = ethers.utils.parseEther("0.000000005"); // 5 gwei + cancellationTxs.push(ethWallet.sendTransaction({ to: ethWallet.address, nonce, maxFeePerGas, maxPriorityFeePerGas }).then((tx) => tx.wait())); + } + if (cancellationTxs.length > 0) { + await Promise.all(cancellationTxs); + console.log(`Canceled ${cancellationTxs.length} pending transactions`); + } + + return new Tester(network, ethProvider, ethWallet, syncWallet, web3Provider); + } + + emptyWallet() { + return zkweb3.Wallet.createRandom().connect(this.web3Provider).connectToL1(this.ethProvider); + } +} diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts new file mode 100644 index 000000000000..bd59f4f821ec --- /dev/null +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -0,0 +1,188 @@ +import * as utils from 'zk/build/utils'; +import { Tester } from './tester'; +import * as zkweb3 from 'zksync-web3'; +import { BigNumber, Contract, ethers, Wallet } from 'ethers'; +import { expect } from 'chai'; +import { hashBytecode } from 'zksync-web3/build/src/utils'; +import fs from 'fs'; +import path from 'path'; +import { IZkSyncFactory } from 'zksync-web3/build/typechain'; +import { TransactionResponse } from 'zksync-web3/build/src/types'; + +const depositAmount = ethers.utils.parseEther('0.001'); + +describe('Upgrade test', function () { + let tester: Tester; + let alice: zkweb3.Wallet; + let mainContract: Contract; + let bootloaderHash: string; + + before('create test wallet', async () => { + tester = await Tester.init(process.env.CHAIN_ETH_NETWORK || 'localhost'); + alice = tester.emptyWallet(); + }); + + step('run server and execute some transactions', async () => { + // Make sure server isn't running. + try { + await utils.exec('pkill zksync_server'); + // It may take some time for witness generator to stop. + await utils.sleep(120); + } catch (_) {} + + // Set 1000 seconds deadline for `CommitBlock` operation. + process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE = '1000'; + process.env.CHAIN_STATE_KEEPER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = '1'; + // Run server in background. + utils.background( + `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper > server_logs.txt` + ); + // Server may need some time to recompile if it's a cold run, so wait for it. + let iter = 0; + while (iter < 30 && !mainContract) { + try { + mainContract = await tester.syncWallet.getMainContract(); + } catch (_) { + await utils.sleep(5); + } + iter += 1; + } + if (!mainContract) { + throw new Error('Server did not start'); + } + let blocksCommitted = await mainContract.getTotalBlocksCommitted(); + + const initialL1BatchNumber = await tester.web3Provider.getL1BatchNumber(); + + const firstDepositHandle = await tester.syncWallet.deposit({ + token: zkweb3.utils.ETH_ADDRESS, + amount: depositAmount, + to: alice.address + }); + await firstDepositHandle.wait(); + while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { + await utils.sleep(1); + } + + const secondDepositHandle = await tester.syncWallet.deposit({ + token: zkweb3.utils.ETH_ADDRESS, + amount: depositAmount, + to: alice.address + }); + await secondDepositHandle.wait(); + while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1) { + await utils.sleep(1); + } + + const balance = await alice.getBalance(); + expect(balance.eq(depositAmount.mul(2)), 'Incorrect balance after deposits').to.be.true; + + // Wait for at least one new committed block + let newBlocksCommitted = await mainContract.getTotalBlocksCommitted(); + let tryCount = 0; + while (blocksCommitted.eq(newBlocksCommitted) && tryCount < 10) { + newBlocksCommitted = await mainContract.getTotalBlocksCommitted(); + tryCount += 1; + await utils.sleep(1); + } + }); + + step('Send l1 tx for saving new bootloader', async () => { + const path = `${process.env.ZKSYNC_HOME}/etc/system-contracts/bootloader/build/artifacts/playground_block.yul/playground_block.yul.zbin`; + const bootloaderCode = ethers.utils.hexlify(fs.readFileSync(path)); + bootloaderHash = ethers.utils.hexlify(hashBytecode(bootloaderCode)); + const txHandle = await tester.syncWallet.requestExecute({ + contractAddress: ethers.constants.AddressZero, + calldata: '0x', + l2GasLimit: 20000000, + factoryDeps: [bootloaderCode], + overrides: { + gasLimit: 3000000 + } + }); + await txHandle.wait(); + + // Set the new bootloader hash and do not send the l1 batches with new bootloader + process.env.CHAIN_STATE_KEEPER_BOOTLOADER_HASH = bootloaderHash; + process.env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_COMMIT_DEADLINE = '1'; + + await utils.exec('pkill zksync_server'); + await utils.sleep(10); + utils.background( + `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper >> server_logs.txt` + ); + await utils.sleep(10); + // Wait for finalizing the last tx with old bootloader + await txHandle.waitFinalize(); + // Create one more tx with the new bootloader + await checkedRandomTransfer(alice, BigNumber.from(1)); + }); + + step('upgrade bootloader on contract', async () => { + const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); + const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + const deployWallet = Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect( + tester.ethProvider + ); + + const address = await tester.web3Provider.getMainContractAddress(); + const contract = IZkSyncFactory.connect(address, deployWallet); + let tx = await contract.setL2BootloaderBytecodeHash(bootloaderHash); + await tx.wait(10); + // Restart server. And start sending the blocks with the new bootloader + await utils.exec('pkill zksync_server'); + await utils.sleep(10); + utils.background( + `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper >> server_logs.txt` + ); + await utils.sleep(10); + }); + + step('execute transactions after simple restart', async () => { + // Execute an L2 transaction + const txHandle = await checkedRandomTransfer(alice, BigNumber.from(1)); + await txHandle.waitFinalize(); + + // Stop server. + await utils.exec('pkill zksync_server'); + await utils.sleep(10); + + // Run again. + utils.background( + `cd $ZKSYNC_HOME && cargo run --bin zksync_server --release -- --components=api,tree,eth,data_fetcher,state_keeper >> server_logs.txt` + ); + await utils.sleep(10); + + // Trying to send a transaction from the same address again + await checkedRandomTransfer(alice, BigNumber.from(1)); + + let bootloaderHashL1 = await mainContract.getL2BootloaderBytecodeHash(); + expect(bootloaderHashL1).eq(bootloaderHash); + }); + + after('Try killing server', async () => { + try { + await utils.exec('pkill zksync_server'); + } catch (_) {} + }); +}); + +async function checkedRandomTransfer(sender: zkweb3.Wallet, amount: BigNumber): Promise { + const senderBalanceBefore = await sender.getBalance(); + const receiver = zkweb3.Wallet.createRandom().connect(sender.provider); + const transferHandle = await sender.sendTransaction({ + to: receiver.address, + value: amount + }); + const txReceipt = await transferHandle.wait(); + + const senderBalanceAfter = await sender.getBalance(); + const receiverBalanceAfter = await receiver.getBalance(); + + expect(receiverBalanceAfter.eq(amount), 'Failed updated the balance of the receiver').to.be.true; + + const spentAmount = txReceipt.gasUsed.mul(transferHandle.gasPrice!).add(amount); + expect(senderBalanceAfter.add(spentAmount).eq(senderBalanceBefore), 'Failed to update the balance of the sender').to + .be.true; + return transferHandle; +} diff --git a/core/tests/upgrade-test/tsconfig.json b/core/tests/upgrade-test/tsconfig.json new file mode 100644 index 000000000000..6c8907a86016 --- /dev/null +++ b/core/tests/upgrade-test/tsconfig.json @@ -0,0 +1,9 @@ +{ + "compilerOptions": { + "target": "es2019", + "module": "commonjs", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true + } +} diff --git a/docker/circuit-synthesizer/Dockerfile b/docker/circuit-synthesizer/Dockerfile index dd128e0ce794..21a51e6329d1 100644 --- a/docker/circuit-synthesizer/Dockerfile +++ b/docker/circuit-synthesizer/Dockerfile @@ -15,8 +15,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2022-08-23 && \ - rustup default nightly-2022-08-23 + rustup install nightly-2023-02-21 && \ + rustup default nightly-2023-02-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 2b1708e78125..8355ea217084 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -32,6 +32,34 @@ RUN mkdir -p /etc/zksolc-bin/v1.3.1 \ && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.1 \ && cp zksolc-linux-amd64-musl-v1.3.1 /etc/zksolc-bin/v1.3.1/zksolc \ && chmod +x /etc/zksolc-bin/v1.3.1/zksolc +RUN mkdir -p /etc/zksolc-bin/v1.3.2 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.2 \ + && cp zksolc-linux-amd64-musl-v1.3.2 /etc/zksolc-bin/v1.3.2/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.2/zksolc +RUN mkdir -p /etc/zksolc-bin/v1.3.3 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.3 \ + && cp zksolc-linux-amd64-musl-v1.3.3 /etc/zksolc-bin/v1.3.3/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.3/zksolc +RUN mkdir -p /etc/zksolc-bin/v1.3.4 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.4 \ + && cp zksolc-linux-amd64-musl-v1.3.4 /etc/zksolc-bin/v1.3.4/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.4/zksolc +RUN mkdir -p /etc/zksolc-bin/v1.3.5 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.5 \ + && cp zksolc-linux-amd64-musl-v1.3.5 /etc/zksolc-bin/v1.3.5/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.5/zksolc + + +RUN mkdir -p /etc/zksolc-bin/v1.3.6 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.6 \ + && cp zksolc-linux-amd64-musl-v1.3.6 /etc/zksolc-bin/v1.3.6/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.6/zksolc + + +RUN mkdir -p /etc/zksolc-bin/v1.3.7 \ + && wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.7 \ + && cp zksolc-linux-amd64-musl-v1.3.7 /etc/zksolc-bin/v1.3.7/zksolc \ + && chmod +x /etc/zksolc-bin/v1.3.7/zksolc COPY docker/contract-verifier/install-all-solc.sh install-all-solc.sh RUN sh ./install-all-solc.sh diff --git a/docker/local-node/entrypoint.sh b/docker/local-node/entrypoint.sh index fd2ffa8aefa0..beab804514fe 100755 --- a/docker/local-node/entrypoint.sh +++ b/docker/local-node/entrypoint.sh @@ -41,6 +41,5 @@ then fi # start server -cd /infrastructure/zk && yarn start config compile && cd / source /etc/env/dev.env zksync_server diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index 6d96eaa2494b..8fe3c5252de3 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -15,8 +15,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2022-08-23 && \ - rustup default nightly-2022-08-23 + rustup install nightly-2023-02-21 && \ + rustup default nightly-2023-02-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 13f219fbdb31..b5c94b1ffc9a 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -2,7 +2,7 @@ # Not expected to work locally # syntax=docker/dockerfile:experimental -FROM rust:1.65-buster as builder +FROM rust:1.67-buster as builder RUN apt-get update && apt-get install -y clang && rm -rf /var/lib/apt/lists/* WORKDIR /usr/src/zksync COPY . . @@ -19,6 +19,8 @@ COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin COPY --from=builder /usr/src/zksync/target/release/rocksdb_util /usr/bin COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ +COPY etc/system-contracts/contracts/artifacts/ /etc/system-contracts/contracts/artifacts/ +COPY etc/system-contracts/contracts/precompiles/artifacts/ /etc/system-contracts/contracts/precompiles/artifacts/ COPY etc/system-contracts/artifacts-zk /etc/system-contracts/artifacts-zk COPY contracts/ethereum/artifacts/ /contracts/ethereum/artifacts/ COPY contracts/zksync/artifacts-zk/ /contracts/zksync/artifacts-zk/ diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index f4cee3d77cc8..687740af664e 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -61,7 +61,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN curl https://sh.rustup.rs -sSf | bash -s -- -y -RUN rustup install nightly-2022-08-23 +RUN rustup install nightly-2023-02-21 RUN rustup default stable RUN cargo install --version=0.5.6 sqlx-cli RUN cargo install cargo-tarpaulin @@ -72,7 +72,6 @@ RUN wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linu && mv solc-linux-amd64-v0.8.12+commit.f00d7308 /usr/bin/solc \ && chmod +x /usr/bin/solc # Obtain `zksolc` 1.1.5. - RUN wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.1.6 \ && mv zksolc-linux-amd64-musl-v1.1.6 /usr/bin/zksolc \ && chmod +x /usr/bin/zksolc diff --git a/docker/zk-rust-nightly-environment/Dockerfile b/docker/zk-rust-nightly-environment/Dockerfile index 902724a06466..7957d552f048 100644 --- a/docker/zk-rust-nightly-environment/Dockerfile +++ b/docker/zk-rust-nightly-environment/Dockerfile @@ -10,8 +10,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ # Setup rust nightly RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2022-08-23 && \ - rustup default nightly-2022-08-23 + rustup install nightly-2023-02-21 && \ + rustup default nightly-2023-02-21 # Setup cmake diff --git a/docs/setup-dev.md b/docs/setup-dev.md index cf7c1e3cd375..d299eee96ebb 100644 --- a/docs/setup-dev.md +++ b/docs/setup-dev.md @@ -13,7 +13,7 @@ If you are a NixOS user or would like to have a reproducible environment, skip t ## `git` -If you are using an ssh key to authenticate with GitHub you need to make git always use ssh instead of http. +If you are using an ssh key to authenticate with Github you need to make git always use ssh instead of http. ```bash git config --global url."ssh://git@github.com/".insteadOf https://github.com/ @@ -63,13 +63,14 @@ If logging out does not help, restarting the computer should. ## `Node` & `Yarn` -1. Install `Node` (requires version 14.14.0). Since our team attempts to always use the latest LTS version of`Node.js`, +1. Install `Node` (requires version 16.19.1). Since our team attempts to always use the latest LTS version of`Node.js`, we suggest you to install [nvm](https://github.com/nvm-sh/nvm). It will allow you to update `Node.js`version easily - in the future. -2. Install `yarn`. Instructions can be found on the [official site](https://classic.yarnpkg.com/en/docs/install/). Check - if `yarn` is installed by running `yarn -v`. If you face any problems when installing `yarn`, it might be the case - that your package manager installed the wrong package.Make sure to thoroughly follow the instructions above on the - official website. It contains a lot of troubleshooting guides in it. + in the future (by running `nvm use 16.19.1`) +2. Install `yarn` (make sure to get version 1.22.19 - you can change the version by running `yarn set version 1.22.19`). + Instructions can be found on the [official site](https://classic.yarnpkg.com/en/docs/install/). + Check if `yarn` is installed by running `yarn -v`. If you face any problems when installing `yarn`, it might be the + case that your package manager installed the wrong package.Make sure to thoroughly follow the instructions above on + the official website. It contains a lot of troubleshooting guides in it. ## `Axel` @@ -172,7 +173,7 @@ Most environments will have this preinstalled but if not, install Python. ## Easier method using `nix` -Nix is a tool that can fetch _exactly_ the right dependencies specified via hashes. The current config is Linux-only, but +Nix is a tool that can fetch _exactly_ the right dependencies specified via hashes. The current config is Linux-only but it is likely that it can be adapted to Mac. Install `nix`. Enable the nix command and flakes. diff --git a/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol b/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol index 01b7b5198add..b31764a8de27 100644 --- a/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol +++ b/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 -pragma solidity ^0.8; +pragma solidity ^0.8.0; import {MSG_VALUE_SIMULATOR_IS_SYSTEM_BIT, MSG_VALUE_SYSTEM_CONTRACT} from "./Constants.sol"; import "./Utils.sol"; diff --git a/etc/contracts-test-data/contracts/custom-account/custom-account.sol b/etc/contracts-test-data/contracts/custom-account/custom-account.sol index 0cb105c5c666..7601f5cd7b87 100644 --- a/etc/contracts-test-data/contracts/custom-account/custom-account.sol +++ b/etc/contracts-test-data/contracts/custom-account/custom-account.sol @@ -33,7 +33,7 @@ contract CustomAccount is IAccount { if (violateValidationRules) { // Such a tx should not pass the validation step, because it depends on the balance of another account require(BOOTLOADER_FORMAL_ADDRESS.balance == 0, "Bootloader balance must be zero"); - } + } } function _validateTransaction(bytes32 _suggestedSignedTxHash, Transaction calldata _transaction) internal { diff --git a/etc/contracts-test-data/contracts/estimator/estimator.sol b/etc/contracts-test-data/contracts/estimator/estimator.sol index 7fc7dfffc64b..5bbe0c82aaa0 100644 --- a/etc/contracts-test-data/contracts/estimator/estimator.sol +++ b/etc/contracts-test-data/contracts/estimator/estimator.sol @@ -14,7 +14,6 @@ interface IL2Messenger { uint160 constant SYSTEM_CONTRACTS_OFFSET = 0x8000; // 2^15 IL2Messenger constant L2_MESSENGER = IL2Messenger(address(SYSTEM_CONTRACTS_OFFSET + 0x08)); -// TODO: Should be set to the actual value (SMA-1185). // Represents the maximum amount of L2->L1 messages that can happen in one block. uint256 constant MAX_L2_L1_MESSAGES_IN_BLOCK = 256; diff --git a/etc/env/base/api.toml b/etc/env/base/api.toml index 4cf93b5c81fd..3d1ac7b4b727 100644 --- a/etc/env/base/api.toml +++ b/etc/env/base/api.toml @@ -42,6 +42,7 @@ account_pks=[ ] estimate_gas_scale_factor=1.2 estimate_gas_acceptable_overestimation=1000 +max_tx_size=1000000 # Configuration for the explorer API [api.explorer] # Port for the explorer API. diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 0b22f30111b3..4eb387b47788 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -47,6 +47,14 @@ reject_tx_at_gas_percentage=0.95 # Whether all transactions should be reexecuted. This is needed to test the rollback functionality. reexecute_each_tx=true +bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b594c" +default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" + +# The price the operator spends on 1 gas of computation in wei. +fair_l2_gas_price=250000000 + +# Max number of computational gas that validation step is allowed to take. +validation_computational_gas_limit=300000 [chain.operations_manager] # Sleep time when there is no new input data diff --git a/etc/env/base/circuit_synthesizer.toml b/etc/env/base/circuit_synthesizer.toml index 766c1ab7a1de..970520025b6b 100644 --- a/etc/env/base/circuit_synthesizer.toml +++ b/etc/env/base/circuit_synthesizer.toml @@ -7,3 +7,4 @@ prover_instance_poll_time_in_milli_secs=250 prometheus_listener_port=3314 prometheus_pushgateway_url="http://127.0.0.1:9091" prometheus_push_interval_ms=100 +prover_group_id=100 diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 7a0226a412b5..63c98bd97c2d 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -17,12 +17,15 @@ L2_ERC20_BRIDGE_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L2_TESTNET_PAYMASTER_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_ALLOW_LIST_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" CREATE2_FACTORY_ADDR="0xce0042B868300000d44A59004Da54A005ffdcf9f" -VK_COMMITMENT_BASIC_CIRCUITS="0x0af0d77503b93a15fedd086638b7326cd3d169a2f388e568f41ea906c7a6eb93" +VALIDATOR_TIMELOCK_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +VALIDATOR_TIMELOCK_EXECUTION_DELAY=0 +VK_COMMITMENT_BASIC_CIRCUITS="0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0" VK_COMMITMENT_LEAF="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" VK_COMMITMENT_NODE="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a21b8f7ef78bd09a8db8" GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" GENESIS_ROOT="0x2d5ab622df708ab44944bb02377be85b6f27812e9ae520734873b7a193898ba4" PRIORITY_TX_MAX_GAS_LIMIT=72000000 +DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT=10000000 GENESIS_BLOCK_COMMITMENT="0x6c7f89335e3ade24a7768ed73c425afd9fac92a094e0681f76cb6feabf8b6223" # Current rollup leaf index after genesis GENESIS_ROLLUP_LEAF_INDEX="21" diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index ee1bda17018a..7dd973c23f0e 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -12,6 +12,8 @@ wait_confirmations=1 expected_wait_time_block=30 # Node polling period in seconds. tx_poll_period=1 +# Aggregate txs polling period in seconds. +aggregate_tx_poll_period=1 # The maximum amount of simultaneously sent Ethereum transactions. max_txs_in_flight=30 # Safe in the local environment, do not repeat on prod (right now it will produce way too many extra calls to web3) proof_sending_mode="SkipEveryProof" diff --git a/etc/env/base/object_store.toml b/etc/env/base/object_store.toml index 961eb3a80909..b06ec092fce5 100644 --- a/etc/env/base/object_store.toml +++ b/etc/env/base/object_store.toml @@ -1,5 +1,4 @@ [object_store] -service_account_path="~/gcloud/service_account.json" bucket_base_url="base_url" mode="FileBacked" file_backed_base_path="artifacts" diff --git a/etc/env/base/prover_group.toml b/etc/env/base/prover_group.toml index b0824219ca52..6bc0ca001354 100644 --- a/etc/env/base/prover_group.toml +++ b/etc/env/base/prover_group.toml @@ -9,3 +9,5 @@ group_6_circuit_ids="12,13" group_7_circuit_ids="14,15" group_8_circuit_ids="16,17" group_9_circuit_ids="3" +group_100_circuit_ids="" +region_read_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location" diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 31404c0f550b..b23144a1e335 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -26,6 +26,7 @@ zksync_verification_key_generator_and_server=info,\ zksync_object_store=info,\ setup_key_generator_and_server=info,\ zksync_circuit_synthesizer=info,\ +en_playground=info,\ """ # `RUST_BACKTRACE` variable diff --git a/etc/env/base/witness_generator.toml b/etc/env/base/witness_generator.toml index 1486a9e76cce..9b77ccf8fcdc 100644 --- a/etc/env/base/witness_generator.toml +++ b/etc/env/base/witness_generator.toml @@ -3,9 +3,4 @@ generation_timeout_in_secs=900 initial_setup_key_path="./keys/setup/setup_2^22.key" key_download_url="https://storage.googleapis.com/universal-setup/setup_2^22.key" max_attempts=1 -# Witness/proof sampling params. -# Sampling will be enabled only if `sampling_enabled=true` and `sampling_safe_prover_lag`, -# `sampling_max_prover_lag` are provided, otherwise it will generate witnesses/proofs for every block. -# When setting `sampling_safe_prover_lag=sampling_max_prover_lag=0` every block proof is skipped. -sampling_enabled=false dump_arguments_for_blocks="2,3" diff --git a/etc/env/docker.env b/etc/env/docker.env deleted file mode 100644 index bf32ba775417..000000000000 --- a/etc/env/docker.env +++ /dev/null @@ -1,14 +0,0 @@ -ETH_CLIENT_WEB3_URL=http://geth:8545 -FEE_TICKER_COINMARKETCAP_BASE_URL=http://dev-ticker:9876 -FEE_TICKER_COINGECKO_BASE_URL=http://dev-ticker:9876 -DATABASE_URL=postgres://postgres@postgres/zksync_local -TEST_DATABASE_URL=postgres://postgres@postgres/zksync_local_test -FEE_TICKER_UNISWAP_URL=http://dev-liquidity-token-watcher:9975/graphql -DEV_LIQUIDITY_TOKEN_WATCHER_BLACKLISTED_TOKENS=0x0000000000000000000000000000000000000001 -DEV_LIQUIDITY_TOKEN_WATCHER_DEFAULT_VOLUME=500 -DEV_LIQUIDITY_TOKEN_WATCHER_REGIME=whitelist - -# Time to process one miniblock (in ms) -CHAIN_STATE_KEEPER_MINIBLOCK_ITERATION_INTERVAL=50 -# For loadtest performing -L1_RPC_ADDRESS=http://geth:8545 diff --git a/etc/lint-config/md.js b/etc/lint-config/md.js index 485fd1dc5c57..13bde529d13f 100644 --- a/etc/lint-config/md.js +++ b/etc/lint-config/md.js @@ -4,5 +4,6 @@ module.exports = { "no-duplicate-header": false, "no-inline-html": false, "line-length": false, - "fenced-code-language": false + "fenced-code-language": false, + "no-multiple-blanks": false }; diff --git a/etc/lint-config/sol.js b/etc/lint-config/sol.js index 2d29c78f9477..d9895757fe83 100644 --- a/etc/lint-config/sol.js +++ b/etc/lint-config/sol.js @@ -5,8 +5,6 @@ module.exports = { // And also there were >290 warnings on *.sol files. Since changes to *.sol // files require an audit, it was decided to postpone the changes to make the solhint // pass. - // - // TODO (ZKS-329): Turn on the majority of the rules and make the solhint comply with them. "state-visibility": "off", "var-name-mixedcase": "off", "avoid-call-value": "off", diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000000..3995d1abc87a --- /dev/null +++ b/flake.lock @@ -0,0 +1,27 @@ +{ + "nodes": { + "root": { + "inputs": { + "stable": "stable" + } + }, + "stable": { + "locked": { + "lastModified": 1659446231, + "narHash": "sha256-hekabNdTdgR/iLsgce5TGWmfIDZ86qjPhxDg/8TlzhE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "eabc38219184cc3e04a974fe31857d8e0eac098d", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-21.11", + "repo": "nixpkgs", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000000..f1e7ae5b9dc3 --- /dev/null +++ b/flake.nix @@ -0,0 +1,40 @@ +{ + description = "zkSync development shell"; + inputs = { + stable.url = "github:NixOS/nixpkgs/nixos-21.11"; + }; + outputs = {self, stable}: { + packages.x86_64-linux.default = + with import stable { system = "x86_64-linux"; }; + pkgs.mkShell { + name = "zkSync"; + src = ./.; + buildInputs = [ + docker-compose + nodejs + yarn + axel + libclang + openssl + pkg-config + postgresql + python3 + solc + ]; + + # for RocksDB and other Rust bindgen libraries + LIBCLANG_PATH = lib.makeLibraryPath [ libclang.lib ]; + BINDGEN_EXTRA_CLANG_ARGS = ''-I"${libclang.lib}/lib/clang/${libclang.version}/include"''; + + shellHook = '' + export ZKSYNC_HOME=$PWD + export PATH=$ZKSYNC_HOME/bin:$PATH + ''; + + # hardhat solc requires ld-linux + # Nixos has to fake it with nix-ld + NIX_LD_LIBRARY_PATH = lib.makeLibraryPath []; + NIX_LD = builtins.readFile "${stdenv.cc}/nix-support/dynamic-linker"; + }; + }; +} diff --git a/infrastructure/local-setup-preparation/src/index.ts b/infrastructure/local-setup-preparation/src/index.ts index de25e226f59e..585df599f82a 100644 --- a/infrastructure/local-setup-preparation/src/index.ts +++ b/infrastructure/local-setup-preparation/src/index.ts @@ -15,16 +15,30 @@ async function depositWithRichAccounts() { throw new Error('zkSync L1 Main contract address was not found'); } + // During the preparation for the local node, the L2 server is not available, so + // it is not possible to estimate the exact number of gas that is required for the transaction + const DEPOSIT_L2_GAS_LIMIT = 10_000_000; + const gasPrice = await ethProvider.getGasPrice(); + const contract = new ethers.Contract(process.env.CONTRACTS_DIAMOND_PROXY_ADDR, utils.ZKSYNC_MAIN_ABI, ethProvider); + + const expectedCost = await contract.l2TransactionBaseCost( + gasPrice, + DEPOSIT_L2_GAS_LIMIT, + utils.DEFAULT_GAS_PER_PUBDATA_LIMIT + ); + for (const wallet of wallets) { const contract = new ethers.Contract(process.env.CONTRACTS_DIAMOND_PROXY_ADDR, utils.ZKSYNC_MAIN_ABI, wallet); const overrides = { - value: AMOUNT_TO_DEPOSIT + value: AMOUNT_TO_DEPOSIT.add(expectedCost) }; const balance = await wallet.getBalance(); console.log(`Wallet balance is ${ethers.utils.formatEther(balance)} ETH`); + // TODO: Currently we're providing zero as an operator fee, which works right now, + // but will be changed in the future. handles.push( // We have to implement the deposit manually because we run this script before running the server, // deposit method from wallet requires a running server @@ -32,8 +46,8 @@ async function depositWithRichAccounts() { wallet.address, AMOUNT_TO_DEPOSIT, '0x', - utils.RECOMMENDED_DEPOSIT_L2_GAS_LIMIT, - utils.DEFAULT_GAS_PER_PUBDATA_LIMIT, + DEPOSIT_L2_GAS_LIMIT, + utils.REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, [], wallet.address, overrides diff --git a/infrastructure/zk/src/clean.ts b/infrastructure/zk/src/clean.ts index 9f306eae1ab0..f5dc76446eb6 100644 --- a/infrastructure/zk/src/clean.ts +++ b/infrastructure/zk/src/clean.ts @@ -2,10 +2,14 @@ import { Command } from 'commander'; import * as fs from 'fs'; import { confirmAction } from './utils'; -export function clean(directory: string) { - if (fs.existsSync(directory)) { - fs.rmdirSync(directory, { recursive: true }); - console.log(`Successfully removed ${directory}`); +export function clean(path: string) { + if (fs.existsSync(path)) { + if (fs.lstatSync(path).isDirectory()) { + fs.rmdirSync(path, { recursive: true }); + } else { + fs.rmSync(path); + } + console.log(`Successfully removed ${path}`); } } @@ -18,17 +22,16 @@ export const command = new Command('clean') .option('--all') .description('removes generated files') .action(async (cmd) => { - if (!cmd.contracts && !cmd.config && !cmd.database && !cmd.backups) { + if (!cmd.contracts && !cmd.config && !cmd.database && !cmd.backups && !cmd.artifacts) { cmd.all = true; // default is all } await confirmAction(); if (cmd.all || cmd.config) { - const env = cmd.environment || process.env.ZKSYNC_ENV || 'dev'; - clean(`etc/env/${env}`); - - fs.rmSync(`etc/env/${env}.env`); - console.log(`Successfully removed etc/env/${env}.env`); + const env = process.env.ZKSYNC_ENV; + clean(`etc/env/${env}.env`); + clean('etc/env/.current'); + clean('etc/env/.init.env'); } if (cmd.all || cmd.artifacts) { diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index 71c4483f97c9..eb9bd72dc6fe 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -1,9 +1,7 @@ import { Command } from 'commander'; import * as toml from '@iarna/toml'; import * as fs from 'fs'; -import * as path from 'path'; import deepExtend from 'deep-extend'; -import { env } from 'process'; const CONFIG_FILES = [ 'api.toml', @@ -26,88 +24,31 @@ const CONFIG_FILES = [ 'prover_group.toml' ]; -async function getEnvironment(): Promise { - const environmentFilePath = path.join(envDirPath(), 'current'); - // Try to read environment from file. - if (fs.existsSync(environmentFilePath)) { - const environment = (await fs.promises.readFile(environmentFilePath)).toString().trim(); - if (environment !== '') { - return environment; - } - } - - // Fallback scenario: file doesn't exist or is empty. - return 'dev'; -} - -function envDirPath(): string { - return path.join(env['ZKSYNC_HOME'] as string, 'etc', 'env'); -} - -function getConfigPath(environment: string, configName: string): string { - return path.join(envDirPath(), environment, configName); -} - -async function loadConfig(environment: string, configName: string) { - const configPath = getConfigPath(environment, configName); - const fileContents = await fs.promises.readFile(configPath); +function loadConfigFile(path: string) { + const fileContents = fs.readFileSync(path); try { return toml.parse(fileContents.toString()); } catch (e: any) { - console.error( - `<${environment}/${configName}> load failed: Parsing error on line ${e.line} column ${e.column}: ${e.message}` - ); + console.error(`${path} load failed: Parsing error on line ${e.line} column ${e.column}: ${e.message}`); process.exit(1); } } -async function checkConfigExistence(environment: string) { - const configFolder = path.join(envDirPath(), environment); - - // Check if the folder exists and it's not empty. - if (fs.existsSync(configFolder) && fs.readdirSync(configFolder).length != 0) { - return; - } - - // Folder doesn't exist or it's empty. - if (environment == 'dev') { - // Copy configs from the `base` folder. - // Folder may be created, just be empty, so create it only if needed. - if (!fs.existsSync(configFolder)) { - await fs.promises.mkdir(configFolder); - } - - for (const configFile of CONFIG_FILES) { - const from = getConfigPath('base', configFile); - const to = getConfigPath('dev', configFile); - await fs.promises.copyFile(from, to); - } - return; - } - - // Folder doesn't exist and the environment is not `dev`. - console.error(`Configuration files were not found for environment <${environment}>`); - process.exit(1); -} - -function collectVariables(prefix: string, config: any): Map { +function collectVariables(config: any, prefix: string = ''): Map { let variables: Map = new Map(); for (const key in config) { const keyUppercase = key.toLocaleUpperCase(); if (typeof config[key] == 'object' && config[key] !== null && !Array.isArray(config[key])) { // It's a map object: parse it recursively. - // Add a prefix for the child elements: // '' -> 'KEY_'; 'KEY_' -> 'KEY_ANOTHER_KEY_'. const newPrefix = `${prefix}${keyUppercase}_`; - - const nestedEntries = collectVariables(newPrefix, config[key]); + const nestedEntries = collectVariables(config[key], newPrefix); variables = new Map([...variables, ...nestedEntries]); } else { const variableName = `${prefix}${keyUppercase}`; const value = Array.isArray(config[key]) ? config[key].join(',') : config[key]; - variables.set(variableName, value); } } @@ -115,49 +56,41 @@ function collectVariables(prefix: string, config: any): Map { return variables; } -async function loadAllConfigs(environment?: string) { - if (!environment) { - environment = await getEnvironment(); - } - - // Check that config folder exists (or initialize it). - await checkConfigExistence(environment); - - // Accumulator to which we will load all the configs. +function loadConfig(env?: string) { + env ??= process.env.ZKSYNC_ENV!; let config = {}; for (const configFile of CONFIG_FILES) { - const localConfig = await loadConfig(environment, configFile); - - // Extend the `config` with the new values. + const localConfig = loadConfigFile(`etc/env/base/${configFile}`); deepExtend(config, localConfig); } + const overridesPath = `${process.env.ZKSYNC_HOME}/etc/env/${env}.toml`; + if (fs.existsSync(overridesPath)) { + const overrides = loadConfigFile(overridesPath); + deepExtend(config, overrides); + } + return config; } -export async function printAllConfigs(environment?: string) { - const config = await loadAllConfigs(environment); +export function printAllConfigs(environment?: string) { + const config = loadConfig(environment); console.log(`${JSON.stringify(config, null, 2)}`); } -export async function compileConfig(environment?: string) { - if (!environment) { - environment = await getEnvironment(); - } - - const config = await loadAllConfigs(environment); - - const variables = collectVariables('', config); +export function compileConfig(environment?: string) { + environment ??= process.env.ZKSYNC_ENV!; + const config = loadConfig(environment); + const variables = collectVariables(config); - let outputFileContents = `# This file is generated automatically by 'zk config compile'\n`; - outputFileContents += `# Do not edit manually!\n\n`; + let outputFileContents = ''; variables.forEach((value: string, key: string) => { outputFileContents += `${key}=${value}\n`; }); - const outputFileName = path.join(envDirPath(), `${environment}.env`); - await fs.promises.writeFile(outputFileName, outputFileContents); + const outputFileName = `etc/env/${environment}.env`; + fs.writeFileSync(outputFileName, outputFileContents); console.log('Configs compiled'); } diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index 87373a328147..1e1f23becd1f 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -25,8 +25,6 @@ function updateContractsEnv(deployLog: String, envVars: Array) { if (matches !== null) { const varContents = matches[0]; env.modify(envVar, varContents); - env.modify_contracts_toml(envVar, varContents); - updatedContracts += `${varContents}\n`; } } @@ -34,6 +32,15 @@ function updateContractsEnv(deployLog: String, envVars: Array) { return updatedContracts; } +export async function initializeValidator(args: any[] = []) { + await utils.confirmAction(); + + const isLocalSetup = process.env.ZKSYNC_LOCAL_SETUP; + const baseCommandL1 = isLocalSetup ? `yarn --cwd /contracts/ethereum` : `yarn l1-contracts`; + + await utils.spawn(`${baseCommandL1} initialize-validator ${args.join(' ')} | tee initilizeValidator.log`); +} + export async function initializeL1AllowList(args: any[] = []) { await utils.confirmAction(); @@ -63,11 +70,15 @@ export async function deployL2(args: any[] = []) { await utils.spawn(`${baseCommandL2} deploy-testnet-paymaster ${args.join(' ')} | tee -a deployL2.log`); + await utils.spawn(`${baseCommandL2} deploy-l2-weth ${args.join(' ')} | tee -a deployL2.log`); + const deployLog = fs.readFileSync('deployL2.log').toString(); const envVars = [ 'CONTRACTS_L2_ETH_BRIDGE_ADDR', 'CONTRACTS_L2_ERC20_BRIDGE_ADDR', - 'CONTRACTS_L2_TESTNET_PAYMASTER_ADDR' + 'CONTRACTS_L2_TESTNET_PAYMASTER_ADDR', + 'CONTRACTS_L2_WETH_IMPLEMENTATION_ADDR', + 'CONTRACTS_L2_WETH_PROXY_ADDR' ]; updateContractsEnv(deployLog, envVars); @@ -92,6 +103,7 @@ export async function deployL1(args: any[]) { 'CONTRACTS_VERIFIER_ADDR', 'CONTRACTS_DIAMOND_INIT_ADDR', 'CONTRACTS_DIAMOND_PROXY_ADDR', + 'CONTRACTS_VALIDATOR_TIMELOCK_ADDR', 'CONTRACTS_GENESIS_TX_HASH', 'CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR', 'CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR', @@ -118,6 +130,7 @@ command .action(redeployL1); command.command('deploy [deploy-opts...]').allowUnknownOption(true).description('deploy contracts').action(deployL1); command.command('build').description('build contracts').action(build); +command.command('initilize-validator').description('initialize validator').action(initializeValidator); command .command('initilize-l1-allow-list-contract') .description('initialize L1 allow list contract') diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 131b9ec01240..3b916ec16b72 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -83,6 +83,10 @@ async function _push(image: string, tagList: string[]) { for (const tag of tagList) { await utils.spawn(`docker push matterlabs/${image}:${tag}`); + await utils.spawn( + `docker tag matterlabs/${image}:${tag} us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}` + ); + await utils.spawn(`docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}`); } } diff --git a/infrastructure/zk/src/dummy-prover.ts b/infrastructure/zk/src/dummy-prover.ts deleted file mode 100644 index f9e136dfc7be..000000000000 --- a/infrastructure/zk/src/dummy-prover.ts +++ /dev/null @@ -1,69 +0,0 @@ -import { Command } from 'commander'; - -import * as server from './server'; -import * as contract from './contract'; -import * as env from './env'; - -async function performRedeployment() { - await contract.build(); - - try { - await server.genesis_from_sources(); - } catch { - console.log('Failed to genesis the state'); - } - - await contract.redeployL1([]); -} - -export async function status() { - if (process.env.CONTRACTS_DUMMY_VERIFIER == 'true') { - console.log('Dummy Prover status: enabled'); - return true; - } - console.log('Dummy Prover status: disabled'); - return false; -} - -async function setStatus(value: boolean, redeploy: boolean) { - env.modify('CONTRACTS_DUMMY_VERIFIER', `CONTRACTS_DUMMY_VERIFIER="${value}"`); - env.modify_contracts_toml('CONTRACTS_DUMMY_VERIFIER', `CONTRACTS_DUMMY_VERIFIER="${value}"`); - await status(); - if (redeploy) { - console.log('Redeploying the contract...'); - await performRedeployment(); - console.log('Done.'); - } -} - -export async function enable(redeploy: boolean = true) { - await setStatus(true, redeploy); -} - -export async function disable(redeploy: boolean = true) { - await setStatus(false, redeploy); -} - -export const command = new Command('dummy-prover').description('commands for zksync dummy prover'); - -command - .command('enable') - .description('enable the dummy prover') - .option('--no-redeploy', 'do not redeploy the contracts') - .action(async (cmd: Command) => { - await enable(cmd.redeploy); - }); - -command - .command('disable') - .description('disable the dummy prover') - .option('--no-redeploy', 'do not redeploy the contracts') - .action(async (cmd: Command) => { - await disable(cmd.redeploy); - }); - -command - .command('status') - .description('check if dummy prover is enabled') - // @ts-ignore - .action(status); diff --git a/infrastructure/zk/src/env.ts b/infrastructure/zk/src/env.ts index 735ac33606f2..50df1a1d449d 100644 --- a/infrastructure/zk/src/env.ts +++ b/infrastructure/zk/src/env.ts @@ -3,21 +3,35 @@ import fs from 'fs'; import dotenv from 'dotenv'; import * as utils from './utils'; import * as config from './config'; -import * as toml from '@iarna/toml'; -export function get() { - fs.readdirSync('etc/env').forEach((file) => { - if (!file.endsWith('.env')) { - return; - } +export function get(print: boolean = false) { + const current = `etc/env/.current`; + const inCurrent = fs.existsSync(current) && fs.readFileSync(current).toString().trim(); + const currentEnv = (process.env.ZKSYNC_ENV = + process.env.ZKSYNC_ENV || inCurrent || (process.env.IN_DOCKER ? 'docker' : 'dev')); - const env = file.replace(/\..*$/, ''); - if (env == process.env.ZKSYNC_ENV) { - console.log(' * ' + env); - } else { - console.log(' ' + env); + if (print) { + const envs = new Set(['dev', currentEnv]); + if (inCurrent) { + envs.add(inCurrent); } - }); + + fs.readdirSync(`etc/env`).forEach((file) => { + if (!file.startsWith('.') && (file.endsWith('.env') || file.endsWith('.toml'))) { + envs.add(file.replace(/\..*$/, '')); + } + }); + + envs.forEach((env) => { + if (env === currentEnv) { + console.log(`* ${env}`); + } else { + console.log(` ${env}`); + } + }); + } + + return currentEnv; } export async function gitHooks() { @@ -29,124 +43,83 @@ export async function gitHooks() { } } -export function set(env: string) { - const envFile = `etc/env/${env}.env`; - const envDir = `etc/env/${env}`; - if (!fs.existsSync(envFile)) { - throw new Error(envFile + ' not found'); +export function set(env: string, print: boolean = false) { + if (!fs.existsSync(`etc/env/${env}.env`) && !fs.existsSync(`etc/env/${env}.toml`)) { + console.error( + `Unknown environment: ${env}.\nCreate an environment file etc/env/${env}.env or etc/env/${env}.toml` + ); + process.exit(1); } - if (!fs.existsSync(envDir)) { - throw new Error(envFile + ' not found'); + fs.writeFileSync('etc/env/.current', env); + process.env.ZKSYNC_ENV = env; + const envFile = (process.env.ENV_FILE = `etc/env/${env}.env`); + if (!fs.existsSync(envFile)) { + // No .env file found - we should compile it! + config.compileConfig(); } + reload(); + get(print); +} - fs.writeFileSync('etc/env/current', env); - process.env.ENV_FILE = envFile; - process.env.ENV_DIR = envDir; - process.env.ZKSYNC_ENV = env; - get(); +// override env with variables from init log +function loadInit() { + if (fs.existsSync('etc/env/.init.env')) { + const initEnv = dotenv.parse(fs.readFileSync('etc/env/.init.env')); + for (const envVar in initEnv) { + process.env[envVar] = initEnv[envVar]; + } + } } // we have to manually override the environment // because dotenv won't override variables that are already set export function reload() { - const envFile = process.env.ENV_FILE as string; - const env = dotenv.parse(fs.readFileSync(envFile)); - for (const envVar in env) { - process.env[envVar] = env[envVar]; - } - load_docker(); -} - -export function load_docker() { - const in_docker: number = parseInt(process.env.IN_DOCKER || '0'); - if (!in_docker) { - return; - } - const envFile = process.env.DOCKER_ENV_FILE as string; - const env = dotenv.parse(fs.readFileSync(envFile)); + const env = dotenv.parse(fs.readFileSync(process.env.ENV_FILE!)); for (const envVar in env) { process.env[envVar] = env[envVar]; } + loadInit(); } // loads environment variables -export async function load() { - const current = 'etc/env/current'; - const zksyncEnv = - process.env.ZKSYNC_ENV || (fs.existsSync(current) ? fs.readFileSync(current).toString().trim() : 'dev'); - const envFile = `etc/env/${zksyncEnv}.env`; - const envDir = `etc/env/${zksyncEnv}`; - const dockerEnvFile = `etc/env/docker.env`; - if (zksyncEnv == 'dev') { - // If there no folder with toml files (or it's empty) we should delete - // the old dev.env and regenerate toml files - if (!fs.existsSync('etc/env/dev') || fs.readdirSync('etc/env/dev').length == 0) { - if (fs.existsSync('etc/env/dev.env')) { - fs.rmSync('etc/env/dev.env'); - } - } - - if (!fs.existsSync('etc/env/dev.env')) { - await config.compileConfig(); - } - } +export function load() { + const zksyncEnv = get(); + const envFile = (process.env.ENV_FILE = `etc/env/${zksyncEnv}.env`); if (!fs.existsSync(envFile)) { - throw new Error('ZkSync config file not found: ' + envFile); + // No .env file found - we should compile it! + config.compileConfig(); } - if (fs.existsSync(dockerEnvFile)) { - process.env.DOCKER_ENV_FILE = dockerEnvFile; - } - process.env.ZKSYNC_ENV = zksyncEnv; - process.env.ENV_FILE = envFile; - process.env.ENV_DIR = envDir; dotenv.config({ path: envFile }); - load_docker(); + loadInit(); // This suppresses the warning that looks like: "Warning: Accessing non-existent property 'INVALID_ALT_NUMBER'...". // This warning is spawned from the `antlr4`, which is a dep of old `solidity-parser` library. - // Old version of `solidity-parser` is still videly used, and currently we can't get rid of it fully. + // Old version of `solidity-parser` is still widely used, and currently we can't get rid of it fully. process.env.NODE_OPTIONS = '--no-warnings'; } -// replaces an env variable in current .env file -// takes variable name, e.g. VARIABLE -// and the new assignment, e.g. VARIABLE=foo +// places the environment logged by `zk init` variables into the .init.env file export function modify(variable: string, assignedVariable: string) { - if (!process.env.ENV_FILE) { - // ENV_FILE variable is not set, do nothing. + const initEnv = 'etc/env/.init.env'; + if (!fs.existsSync(initEnv)) { + fs.writeFileSync(initEnv, assignedVariable); return; } - const envFile = process.env.ENV_FILE as string; - if (!fs.existsSync(envFile)) { - console.log(`${process.env.ENV_FILE} env file was not found, skipping update...`); - return; + let source = fs.readFileSync(initEnv).toString(); + if (source.includes(variable)) { + utils.replaceInFile(initEnv, `${variable}=.*`, assignedVariable.trim()); + } else { + source += `\n${assignedVariable}`; + fs.writeFileSync(initEnv, source); } - utils.replaceInFile(envFile, `${variable}=.*`, assignedVariable.trim()); reload(); } -export function modify_contracts_toml(variable: string, assignedVariable: string) { - const toml_file = `${process.env.ENV_DIR}/contracts.toml`; - - if (!fs.existsSync(toml_file)) { - console.log(`contracts.toml config file was not found, skipping update...`); - return; - } - - const source = fs.readFileSync(toml_file).toString(); - const toml_res = toml.parse(source); - const trimmed_variable = variable.replace('CONTRACTS_', ''); - const trimmed_value = assignedVariable.split('='); - // @ts-ignore - toml_res['contracts'][trimmed_variable] = trimmed_value[1]; - fs.writeFileSync(toml_file, toml.stringify(toml_res)); -} - export const command = new Command('env') .arguments('[env_name]') .description('get or set zksync environment') .action((envName?: string) => { - envName ? set(envName) : get(); + envName ? set(envName, true) : get(true); }); diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index 08bdafa7e5fc..2b03e361977d 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -7,9 +7,7 @@ import { command as contractVerifier } from './contract_verifier'; import { command as up } from './up'; import { command as down } from './down'; import { command as contract } from './contract'; -import { command as dummyProver } from './dummy-prover'; -import { initCommand as init, reinitCommand as reinit, lightweightInitCommand as lightweight_init } from './init'; -import { command as prover } from './prover'; +import { initCommand as init, reinitCommand as reinit, lightweightInitCommand as lightweightInit } from './init'; import { command as run } from './run/run'; import { command as test } from './test/test'; import { command as docker } from './docker'; @@ -20,7 +18,6 @@ import { command as completion } from './completion'; import { command as config } from './config'; import { command as clean } from './clean'; import { command as db } from './database/database'; -// import { command as uni } from './uni'; import * as env from './env'; const COMMANDS = [ @@ -30,11 +27,9 @@ const COMMANDS = [ down, db, contract, - dummyProver, init, reinit, - lightweight_init, - prover, + lightweightInit, run, test, fmt, @@ -43,7 +38,6 @@ const COMMANDS = [ config, clean, compiler, - // uni, env.command, completion(program as Command) ]; @@ -58,7 +52,7 @@ async function main() { process.chdir(ZKSYNC_HOME); } - await env.load(); + env.load(); program.version('0.1.0').name('zk').description('zksync workflow tools'); diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index 242ffca9fa46..8e4c88161ad1 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -1,5 +1,6 @@ import { Command } from 'commander'; import chalk from 'chalk'; +import fs from 'fs'; import * as utils from './utils'; import * as server from './server'; @@ -37,8 +38,9 @@ export async function init(skipSubmodulesCheckout: boolean) { await announced('Checking PLONK setup', run.plonkSetup()); await announced('Building contracts', contract.build()); await announced('Deploying localhost ERC20 tokens', run.deployERC20('dev')); - await announced('Running server genesis setup', server.genesis_from_sources()); + await announced('Running server genesis setup', server.genesisFromSources()); await announced('Deploying L1 contracts', contract.redeployL1([])); + await announced('Initializing validator', contract.initializeValidator()); await announced('Initialize L1 allow list', contract.initializeL1AllowList()); await announced('Deploying L2 contracts', contract.deployL2()); } @@ -54,8 +56,9 @@ export async function reinit() { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); await announced('Building contracts', contract.build()); - await announced('Running server genesis setup', server.genesis_from_sources()); + await announced('Running server genesis setup', server.genesisFromSources()); await announced('Deploying L1 contracts', contract.redeployL1([])); + await announced('Initializing validator', contract.initializeValidator()); await announced('Initializing L1 Allow list', contract.initializeL1AllowList()); await announced('Deploying L2 contracts', contract.deployL2()); } @@ -64,8 +67,9 @@ export async function reinit() { export async function lightweightInit() { await announced('Clean rocksdb', clean('db')); await announced('Clean backups', clean('backups')); - await announced('Running server genesis setup', server.genesis_from_binary()); + await announced('Running server genesis setup', server.genesisFromBinary()); await announced('Deploying L1 contracts', contract.redeployL1([])); + await announced('Initializing validator', contract.initializeValidator()); await announced('Initializing L1 Allow list', contract.initializeL1AllowList()); await announced('Deploying L2 contracts', contract.deployL2()); } @@ -87,9 +91,9 @@ async function announced(fn: string, promise: Promise | void) { console.log(`${successLine} ${timestampLine}`); } -async function createVolumes() { - await utils.exec('mkdir -p $ZKSYNC_HOME/volumes/geth'); - await utils.exec('mkdir -p $ZKSYNC_HOME/volumes/postgres'); +function createVolumes() { + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/geth`, { recursive: true }); + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/postgres`, { recursive: true }); } async function submoduleUpdate() { diff --git a/infrastructure/zk/src/prover.ts b/infrastructure/zk/src/prover.ts deleted file mode 100644 index 82f96125bb78..000000000000 --- a/infrastructure/zk/src/prover.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { Command } from 'commander'; -import * as utils from './utils'; -import os from 'os'; - -export async function prover(totalProvers: number) { - let children: Promise[] = []; - - for (let id = 1; id <= totalProvers; id++) { - const name = `${os.hostname()}_${id}_blocks`; - console.log('Started prover', name); - const child = utils.spawn( - `cargo run --release --bin zksync_prover -- --worker_name=${name} plonk-step-by-step` - ); - children.push(child); - } - - await Promise.all(children); -} - -export const command = new Command('prover') - .description('run zksync prover') - .arguments('[number_of_provers]') - .action(async (provers?: string) => { - const totalProvers = provers ? parseInt(provers) : 1; - await prover(totalProvers); - }); diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index 79dfeb27401b..bd1735686674 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -27,6 +27,8 @@ async function create_genesis(cmd: string) { await utils.spawn(`${cmd} | tee genesis.log`); const genesisContents = fs.readFileSync('genesis.log').toString().split('\n'); const genesisBlockCommitment = genesisContents.find((line) => line.includes('CONTRACTS_GENESIS_BLOCK_COMMITMENT=')); + const genesisBootloaderHash = genesisContents.find((line) => line.includes('CHAIN_STATE_KEEPER_BOOTLOADER_HASH=')); + const genesisDefaultAAHash = genesisContents.find((line) => line.includes('CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=')); const genesisRoot = genesisContents.find((line) => line.includes('CONTRACTS_GENESIS_ROOT=')); const genesisRollupLeafIndex = genesisContents.find((line) => line.includes('CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX=') @@ -35,6 +37,20 @@ async function create_genesis(cmd: string) { throw Error(`Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty)`); } + if ( + genesisBootloaderHash == null || + !/^CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x[a-fA-F0-9]{64}$/.test(genesisBootloaderHash) + ) { + throw Error(`Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty)`); + } + + if ( + genesisDefaultAAHash == null || + !/^CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x[a-fA-F0-9]{64}$/.test(genesisDefaultAAHash) + ) { + throw Error(`Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty)`); + } + if ( genesisBlockCommitment == null || !/^CONTRACTS_GENESIS_BLOCK_COMMITMENT=0x[a-fA-F0-9]{64}$/.test(genesisBlockCommitment) @@ -62,18 +78,17 @@ async function create_genesis(cmd: string) { fs.mkdirSync(`logs/${label}`, { recursive: true }); fs.copyFileSync('genesis.log', `logs/${label}/genesis.log`); env.modify('CONTRACTS_GENESIS_ROOT', genesisRoot); + env.modify('CHAIN_STATE_KEEPER_BOOTLOADER_HASH', genesisBootloaderHash); + env.modify('CHAIN_STATE_KEEPER_DEFAULT_AA_HASH', genesisDefaultAAHash); env.modify('CONTRACTS_GENESIS_BLOCK_COMMITMENT', genesisBlockCommitment); env.modify('CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX', genesisRollupLeafIndex); - env.modify_contracts_toml('CONTRACTS_GENESIS_ROOT', genesisRoot); - env.modify_contracts_toml('CONTRACTS_GENESIS_BLOCK_COMMITMENT', genesisBlockCommitment); - env.modify_contracts_toml('CONTRACTS_GENESIS_ROLLUP_LEAF_INDEX', genesisRollupLeafIndex); } -export async function genesis_from_sources() { +export async function genesisFromSources() { await create_genesis('cargo run --bin zksync_server --release -- --genesis'); } -export async function genesis_from_binary() { +export async function genesisFromBinary() { await create_genesis('zksync_server --genesis'); } @@ -85,7 +100,7 @@ export const command = new Command('server') .option('--components ', 'comma-separated list of components to run') .action(async (cmd: Command) => { if (cmd.genesis) { - await genesis_from_sources(); + await genesisFromSources(); } else { await server(cmd.rebuildTree, cmd.openzeppelinTests, cmd.components); } diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts index 75285b6f2f17..096e3198cb2d 100644 --- a/infrastructure/zk/src/test/integration.ts +++ b/infrastructure/zk/src/test/integration.ts @@ -22,11 +22,20 @@ export async function server() { await utils.spawn('yarn ts-integration test'); } +export async function fees() { + await utils.spawn('yarn ts-integration fee-test'); +} + export async function revert(bail: boolean = false) { const flag = bail ? ' --bail' : ''; await utils.spawn('yarn revert-test revert-and-restart-test' + flag); } +export async function upgrade(bail: boolean = false) { + const flag = bail ? ' --bail' : ''; + await utils.spawn('yarn upgrade-test upgrade-test' + flag); +} + export async function withdrawalHelpers() { await utils.spawn('yarn ts-tests withdrawal-helpers-test'); } @@ -98,6 +107,13 @@ command await server(); }); +command + .command('fees') + .description('run server integration tests') + .action(async () => { + await fees(); + }); + command .command('revert') .description('run revert test') @@ -106,6 +122,14 @@ command await revert(cmd.bail); }); +command + .command('upgrade') + .description('run upgrade test') + .option('--bail') + .action(async (cmd: Command) => { + await upgrade(cmd.bail); + }); + command .command('rust-sdk') .description('run rust SDK integration tests') diff --git a/package.json b/package.json index bfa80568d438..c4fdf76dcb19 100644 --- a/package.json +++ b/package.json @@ -15,6 +15,7 @@ "infrastructure/local-setup-preparation", "infrastructure/openzeppelin-tests-preparation", "core/tests/revert-test", + "core/tests/upgrade-test", "core/tests/ts-integration" ], "nohoist": [ @@ -30,6 +31,7 @@ "l1-contracts": "yarn workspace l1-zksync-contracts", "l2-contracts": "yarn workspace l2-zksync-contracts", "revert-test": "yarn workspace revert-test", + "upgrade-test": "yarn workspace upgrade-test", "ts-integration": "yarn workspace ts-integration", "zk": "yarn workspace zk", "reading-tool": "yarn workspace reading-tool", diff --git a/renovate.json b/renovate.json index 751c26d2767b..055bc3425806 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,11 @@ { - "extends": ["config:base", "helpers:pinGitHubActionDigests"], - "enabledManagers": ["github-actions"], + "enabled": false, + "extends": [ + "config:base", + "helpers:pinGitHubActionDigests" + ], + "enabledManagers": [ + "github-actions" + ], "prCreation": "immediate" } diff --git a/sdk/zksync-rs/CHANGELOG.md b/sdk/zksync-rs/CHANGELOG.md new file mode 100644 index 000000000000..dc1f16719e1f --- /dev/null +++ b/sdk/zksync-rs/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog + +## [0.4.0](https://github.com/matter-labs/zksync-2-dev/compare/zksync-v0.3.0...zksync-v0.4.0) (2023-03-23) + + +### âš  BREAKING CHANGES + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) + +### Features + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) ([d28e01c](https://github.com/matter-labs/zksync-2-dev/commit/d28e01ce0fbf0129c2cbba877efe65da7f7ed367)) + + +### Bug Fixes + +* Fix using L1 option parameters ([#1552](https://github.com/matter-labs/zksync-2-dev/issues/1552)) ([a769ca1](https://github.com/matter-labs/zksync-2-dev/commit/a769ca1cfd638b796ff99d30dc2530c2abae6074)) diff --git a/sdk/zksync-rs/Cargo.toml b/sdk/zksync-rs/Cargo.toml index 09c990ac4223..3827dc1c0273 100644 --- a/sdk/zksync-rs/Cargo.toml +++ b/sdk/zksync-rs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync" -version = "0.3.0" +version = "0.4.0" authors = ["The Matter Labs Team "] edition = "2018" diff --git a/sdk/zksync-rs/src/ethereum/mod.rs b/sdk/zksync-rs/src/ethereum/mod.rs index 045a28bdb1e4..4a1e5faa13e5 100644 --- a/sdk/zksync-rs/src/ethereum/mod.rs +++ b/sdk/zksync-rs/src/ethereum/mod.rs @@ -11,7 +11,7 @@ use zksync_types::{ transports::Http, types::{TransactionReceipt, H160, H256, U256}, }, - L1ChainId, U64, + L1ChainId, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U64, }; use zksync_web3_decl::namespaces::{EthNamespaceClient, ZksNamespaceClient}; @@ -426,7 +426,8 @@ impl EthereumProvider { tx_data, Options::with(|f| { f.gas = Some(U256::from(300000)); - f.value = Some(value) + f.value = Some(value); + f.gas_price = Some(gas_price) }), "zksync-rs", ) @@ -458,17 +459,6 @@ impl EthereumProvider { let is_eth_deposit = l1_token_address == Address::zero(); - let base_cost: U256 = U256::zero(); - - // Calculate the amount of ether to be sent in the transaction. - let total_value = if is_eth_deposit { - // Both fee component and the deposit amount are represented as `msg.value`. - base_cost + operator_tip + amount - } else { - // ERC20 token, `msg.value` is used only for the fee. - base_cost + operator_tip - }; - // Calculate the gas limit for transaction: it may vary for different tokens. let gas_limit = if is_eth_deposit { 200_000u64 @@ -489,10 +479,48 @@ impl EthereumProvider { }; let mut options = eth_options.unwrap_or_default(); + + // If the user has already provided max_fee_per_gas or gas_price, we will use + // it to calculate the base cost for the transaction + let gas_price = if let Some(max_fee_per_gas) = options.max_fee_per_gas { + max_fee_per_gas + } else if let Some(gas_price) = options.gas_price { + gas_price + } else { + let gas_price = self + .eth_client + .get_gas_price("zksync-rs") + .await + .map_err(|e| ClientError::NetworkError(e.to_string()))?; + + options.gas_price = Some(gas_price); + + gas_price + }; + + let l2_gas_limit = U256::from(3_000_000u32); + + let base_cost: U256 = self + .base_cost( + l2_gas_limit, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE as u32, + Some(gas_price), + ) + .await + .map_err(|e| ClientError::NetworkError(e.to_string()))?; + + // Calculate the amount of ether to be sent in the transaction. + let total_value = if is_eth_deposit { + // Both fee component and the deposit amount are represented as `msg.value`. + base_cost + operator_tip + amount + } else { + // ERC20 token, `msg.value` is used only for the fee. + base_cost + operator_tip + }; + options.value = Some(total_value); options.gas = Some(gas_limit.into()); - let l2_gas_limit = U256::from(10000000u32); let transaction_hash = if is_eth_deposit { self.request_execute( to, @@ -501,7 +529,7 @@ impl EthereumProvider { l2_gas_limit, None, None, - None, + Some(gas_price), Default::default(), ) .await? diff --git a/sdk/zksync-web3.js/CHANGELOG.md b/sdk/zksync-web3.js/CHANGELOG.md new file mode 100644 index 000000000000..e9d1fbeb8d96 --- /dev/null +++ b/sdk/zksync-web3.js/CHANGELOG.md @@ -0,0 +1,19 @@ +# Changelog + +## [0.14.0](https://github.com/matter-labs/zksync-2-dev/compare/zksync-web3-v0.13.3...zksync-web3-v0.14.0) (2023-03-21) + + +### âš  BREAKING CHANGES + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) + +### Features + +* **contracts:** M6 batch of breaking changes ([#1482](https://github.com/matter-labs/zksync-2-dev/issues/1482)) ([d28e01c](https://github.com/matter-labs/zksync-2-dev/commit/d28e01ce0fbf0129c2cbba877efe65da7f7ed367)) +* Make server compatible with new SDK ([#1532](https://github.com/matter-labs/zksync-2-dev/issues/1532)) ([1c52738](https://github.com/matter-labs/zksync-2-dev/commit/1c527382d1e36c04df90bdf71fe643db724acb48)) +* **SDK:** Use old ABI ([#1558](https://github.com/matter-labs/zksync-2-dev/issues/1558)) ([293882f](https://github.com/matter-labs/zksync-2-dev/commit/293882f2b20c95891ecfc4b72720c82e03babc7e)) + + +### Bug Fixes + +* **sdk:** Fix address overflow when applying l2tol1 alias ([#1527](https://github.com/matter-labs/zksync-2-dev/issues/1527)) ([8509b20](https://github.com/matter-labs/zksync-2-dev/commit/8509b20854fcb2a45ea8d1350b3f2904d99eda93)) diff --git a/sdk/zksync-web3.js/abi/ContractDeployer.json b/sdk/zksync-web3.js/abi/ContractDeployer.json index 55dfa544b8fb..15dfc3d387d8 100644 --- a/sdk/zksync-web3.js/abi/ContractDeployer.json +++ b/sdk/zksync-web3.js/abi/ContractDeployer.json @@ -1,5 +1,43 @@ { "abi": [ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "accountAddress", + "type": "address" + }, + { + "indexed": false, + "internalType": "enum IContractDeployer.AccountNonceOrdering", + "name": "nonceOrdering", + "type": "uint8" + } + ], + "name": "AccountNonceOrderingUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "accountAddress", + "type": "address" + }, + { + "indexed": false, + "internalType": "enum IContractDeployer.AccountAbstractionVersion", + "name": "aaVersion", + "type": "uint8" + } + ], + "name": "AccountVersionUpdated", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -29,7 +67,7 @@ "inputs": [ { "internalType": "bytes32", - "name": "", + "name": "_salt", "type": "bytes32" }, { @@ -341,7 +379,7 @@ "type": "address" } ], - "stateMutability": "pure", + "stateMutability": "view", "type": "function" }, { diff --git a/sdk/zksync-web3.js/abi/IAllowList.json b/sdk/zksync-web3.js/abi/IAllowList.json index 21f49da47619..abc5a1481a64 100644 --- a/sdk/zksync-web3.js/abi/IAllowList.json +++ b/sdk/zksync-web3.js/abi/IAllowList.json @@ -135,37 +135,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "address", - "name": "_l1Token", - "type": "address" - } - ], - "name": "getTokenWithdrawalLimitData", - "outputs": [ - { - "components": [ - { - "internalType": "bool", - "name": "withdrawalLimitation", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "withdrawalFactor", - "type": "uint256" - } - ], - "internalType": "struct IAllowList.Withdrawal", - "name": "", - "type": "tuple" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [ { @@ -309,29 +278,6 @@ "outputs": [], "stateMutability": "nonpayable", "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_l1Token", - "type": "address" - }, - { - "internalType": "bool", - "name": "_withdrawalLimitation", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "_withdrawalFactor", - "type": "uint256" - } - ], - "name": "setWithdrawalLimit", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" } ] } diff --git a/sdk/zksync-web3.js/abi/IEthToken.json b/sdk/zksync-web3.js/abi/IEthToken.json index 2c54afcbe009..f9bb26498d67 100644 --- a/sdk/zksync-web3.js/abi/IEthToken.json +++ b/sdk/zksync-web3.js/abi/IEthToken.json @@ -47,6 +47,12 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "_l2Sender", + "type": "address" + }, { "indexed": true, "internalType": "address", @@ -66,9 +72,9 @@ { "inputs": [ { - "internalType": "address", + "internalType": "uint256", "name": "", - "type": "address" + "type": "uint256" } ], "name": "balanceOf", diff --git a/sdk/zksync-web3.js/abi/IZkSync.json b/sdk/zksync-web3.js/abi/IZkSync.json index c31cd6a2aad2..0196291d0185 100644 --- a/sdk/zksync-web3.js/abi/IZkSync.json +++ b/sdk/zksync-web3.js/abi/IZkSync.json @@ -1825,152 +1825,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "_txId", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_l2Value", - "type": "uint256" - }, - { - "internalType": "address", - "name": "_sender", - "type": "address" - }, - { - "internalType": "address", - "name": "_contractAddressL2", - "type": "address" - }, - { - "internalType": "bytes", - "name": "_calldata", - "type": "bytes" - }, - { - "internalType": "uint256", - "name": "_l2GasLimit", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_l2GasPerPubdataByteLimit", - "type": "uint256" - }, - { - "internalType": "bytes[]", - "name": "_factoryDeps", - "type": "bytes[]" - }, - { - "internalType": "uint256", - "name": "_toMint", - "type": "uint256" - }, - { - "internalType": "address", - "name": "_refundRecipient", - "type": "address" - } - ], - "name": "serializeL2Transaction", - "outputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "txType", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "from", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "to", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasPerPubdataByteLimit", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "maxFeePerGas", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "maxPriorityFeePerGas", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "paymaster", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "nonce", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "internalType": "uint256[4]", - "name": "reserved", - "type": "uint256[4]" - }, - { - "internalType": "bytes", - "name": "data", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "signature", - "type": "bytes" - }, - { - "internalType": "uint256[]", - "name": "factoryDeps", - "type": "uint256[]" - }, - { - "internalType": "bytes", - "name": "paymasterInput", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "reservedDynamic", - "type": "bytes" - } - ], - "internalType": "struct IMailbox.L2CanonicalTransaction", - "name": "", - "type": "tuple" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [ { diff --git a/sdk/zksync-web3.js/abi/update-abi.sh b/sdk/zksync-web3.js/abi/update-abi.sh index c43a800a7834..5961ae3485b5 100755 --- a/sdk/zksync-web3.js/abi/update-abi.sh +++ b/sdk/zksync-web3.js/abi/update-abi.sh @@ -17,4 +17,5 @@ cat $ZKSYNC_CONTRACTS/bridge/interfaces/IL2Bridge.sol/IL2Bridge.json | jq '{ abi cat $ZKSYNC_CONTRACTS/interfaces/IPaymasterFlow.sol/IPaymasterFlow.json | jq '{ abi: .abi}' > IPaymasterFlow.json cat $SYSTEM_CONTRACTS/interfaces/IL1Messenger.sol/IL1Messenger.json | jq '{ abi: .abi}' > IL1Messenger.json +cat $SYSTEM_CONTRACTS/interfaces/IEthToken.sol/IEthToken.json | jq '{ abi: .abi}' > IEthToken.json cat $SYSTEM_CONTRACTS/ContractDeployer.sol/ContractDeployer.json | jq '{ abi: .abi}' > ContractDeployer.json diff --git a/sdk/zksync-web3.js/package.json b/sdk/zksync-web3.js/package.json index 1e366498f318..619c45bf676e 100644 --- a/sdk/zksync-web3.js/package.json +++ b/sdk/zksync-web3.js/package.json @@ -1,6 +1,6 @@ { "name": "zksync-web3", - "version": "0.13.0", + "version": "0.14.1", "main": "build/src/index.js", "types": "build/src/index.d.ts", "files": [ diff --git a/sdk/zksync-web3.js/src/adapters.ts b/sdk/zksync-web3.js/src/adapters.ts index f92ef7df1ef1..c8b3db64f93b 100644 --- a/sdk/zksync-web3.js/src/adapters.ts +++ b/sdk/zksync-web3.js/src/adapters.ts @@ -1,20 +1,19 @@ -import { BigNumber, BigNumberish, ethers, BytesLike } from 'ethers'; +import { BigNumber, BigNumberish, BytesLike, ethers } from 'ethers'; +import { IERC20MetadataFactory, IL1BridgeFactory, IL2BridgeFactory, IZkSyncFactory } from '../typechain'; import { Provider } from './provider'; +import { Address, BalancesMap, BlockTag, Eip712Meta, PriorityOpResponse, TransactionResponse } from './types'; import { - RECOMMENDED_GAS_LIMIT, - isETH, - ETH_ADDRESS, + BOOTLOADER_FORMAL_ADDRESS, checkBaseCost, - undoL1ToL2Alias, - layer1TxDefaults, DEFAULT_GAS_PER_PUBDATA_LIMIT, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, + ETH_ADDRESS, + isETH, L1_MESSENGER_ADDRESS, - BOOTLOADER_FORMAL_ADDRESS, - RECOMMENDED_DEPOSIT_L2_GAS_LIMIT, - DEPOSIT_GAS_PER_PUBDATA_LIMIT + layer1TxDefaults, + undoL1ToL2Alias, + estimateDefaultBridgeDepositL2Gas } from './utils'; -import { IZkSyncFactory, IL1BridgeFactory, IL2BridgeFactory, IERC20MetadataFactory } from '../typechain'; -import { Address, PriorityOpResponse, BlockTag, Eip712Meta, TransactionResponse, BalancesMap } from './types'; type Constructor = new (...args: any[]) => T; @@ -84,22 +83,7 @@ export function AdapterL1>(Base: TBase) { delete overrides.bridgeAddress; } - let gasLimit: BigNumberish; - if (overrides?.gasLimit) { - gasLimit = await overrides.gasLimit; - } else { - // For some reason, gas estimation for approves may be imprecise. - // At least in the localhost scenario. - gasLimit = await erc20contract.estimateGas.approve(bridgeAddress, amount); - gasLimit = gasLimit.gt(RECOMMENDED_GAS_LIMIT.ERC20_APPROVE) - ? gasLimit - : RECOMMENDED_GAS_LIMIT.ERC20_APPROVE; - } - - return await erc20contract.approve(bridgeAddress, amount, { - gasLimit, - ...overrides - }); + return await erc20contract.approve(bridgeAddress, amount, overrides); } async getBaseCost(params: { @@ -110,7 +94,7 @@ export function AdapterL1>(Base: TBase) { const zksyncContract = await this.getMainContract(); const parameters = { ...layer1TxDefaults(), ...params }; parameters.gasPrice ??= await this._providerL1().getGasPrice(); - parameters.gasPerPubdataByte ??= DEPOSIT_GAS_PER_PUBDATA_LIMIT; + parameters.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; return BigNumber.from( await zksyncContract.l2TransactionBaseCost( @@ -133,6 +117,52 @@ export function AdapterL1>(Base: TBase) { overrides?: ethers.PayableOverrides; approveOverrides?: ethers.Overrides; }): Promise { + const depositTx = await this.getDepositTx(transaction); + if (transaction.token == ETH_ADDRESS) { + return this.requestExecute(depositTx); + } else { + const bridgeContracts = await this.getL1BridgeContracts(); + if (transaction.approveERC20) { + const approveTx = await this.approveERC20(transaction.token, transaction.amount, { + bridgeAddress: transaction.bridgeAddress ?? bridgeContracts.erc20.address, + ...transaction.approveOverrides + }); + await approveTx.wait(); + } + return await this._providerL2().getPriorityOpResponse( + await this._signerL1().sendTransaction(depositTx) + ); + } + } + + async estimateGasDeposit(transaction: { + token: Address; + amount: BigNumberish; + to?: Address; + operatorTip?: BigNumberish; + bridgeAddress?: Address; + l2GasLimit?: BigNumberish; + gasPerPubdataByte?: BigNumberish; + overrides?: ethers.PayableOverrides; + }): Promise { + const depositTx = await this.getDepositTx(transaction); + if (transaction.token == ETH_ADDRESS) { + return await this.estimateGasRequestExecute(depositTx); + } else { + return await this._providerL1().estimateGas(depositTx); + } + } + + async getDepositTx(transaction: { + token: Address; + amount: BigNumberish; + to?: Address; + operatorTip?: BigNumberish; + bridgeAddress?: Address; + l2GasLimit?: BigNumberish; + gasPerPubdataByte?: BigNumberish; + overrides?: ethers.PayableOverrides; + }): Promise { const bridgeContracts = await this.getL1BridgeContracts(); if (transaction.bridgeAddress) { bridgeContracts.erc20.attach(transaction.bridgeAddress); @@ -142,59 +172,53 @@ export function AdapterL1>(Base: TBase) { tx.to ??= await this.getAddress(); tx.operatorTip ??= BigNumber.from(0); tx.overrides ??= {}; - tx.gasPerPubdataByte ??= DEPOSIT_GAS_PER_PUBDATA_LIMIT; - tx.l2GasLimit ??= BigNumber.from(RECOMMENDED_DEPOSIT_L2_GAS_LIMIT); + tx.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; + tx.l2GasLimit ??= await estimateDefaultBridgeDepositL2Gas( + this._providerL1(), + this._providerL2(), + tx.token, + tx.amount, + tx.to, + await this.getAddress(), + tx.gasPerPubdataByte + ); const { to, token, amount, operatorTip, overrides } = tx; overrides.gasPrice ??= await this._providerL1().getGasPrice(); - overrides.gasLimit ??= BigNumber.from(RECOMMENDED_GAS_LIMIT.DEPOSIT); - const baseCost = BigNumber.from(0); + const zksyncContract = await this.getMainContract(); - const args: [Address, Address, BigNumberish, BigNumberish, BigNumberish] = [ - to, - token, - amount, + const baseCost = await zksyncContract.l2TransactionBaseCost( + await overrides.gasPrice, tx.l2GasLimit, tx.gasPerPubdataByte - ]; + ); if (token == ETH_ADDRESS) { overrides.value ??= baseCost.add(operatorTip).add(amount); - return await this.requestExecute({ + return { contractAddress: to, calldata: '0x', l2Value: amount, // For some reason typescript can not deduce that we've already set the - // tx.gasLimit + // tx.l2GasLimit l2GasLimit: tx.l2GasLimit!, ...tx - }); + }; } else { - overrides.value ??= baseCost.add(operatorTip); + const args: [Address, Address, BigNumberish, BigNumberish, BigNumberish] = [ + to, + token, + amount, + tx.l2GasLimit, + tx.gasPerPubdataByte + ]; + overrides.value ??= baseCost.add(operatorTip); await checkBaseCost(baseCost, overrides.value); - if (transaction.approveERC20) { - const approveTx = await this.approveERC20(token, amount, { - bridgeAddress: bridgeContracts.erc20.address, - ...transaction.approveOverrides - }); - overrides.nonce ??= approveTx.nonce + 1; - } - - if (overrides.gasLimit == null) { - const gasEstimate = await bridgeContracts.erc20.estimateGas - .deposit(...args, overrides) - .catch(() => BigNumber.from(0)); - const recommendedGasLimit = RECOMMENDED_GAS_LIMIT.DEPOSIT; - overrides.gasLimit = gasEstimate.gte(recommendedGasLimit) ? gasEstimate : recommendedGasLimit; - } - - return await this._providerL2().getPriorityOpResponse( - await bridgeContracts.erc20.deposit(...args, overrides) - ); + return await bridgeContracts.erc20.populateTransaction.deposit(...args, overrides); } } @@ -227,23 +251,36 @@ export function AdapterL1>(Base: TBase) { }; } - async finalizeWithdrawal(withdrawalHash: BytesLike, index: number = 0, overrides?: ethers.Overrides) { + async finalizeWithdrawalParams(withdrawalHash: BytesLike, index: number = 0) { const { log, l1BatchTxId } = await this._getWithdrawalLog(withdrawalHash, index); const { l2ToL1LogIndex } = await this._getWithdrawalL2ToL1Log(withdrawalHash, index); const sender = ethers.utils.hexDataSlice(log.topics[1], 12); const proof = await this._providerL2().getLogProof(withdrawalHash, l2ToL1LogIndex); const message = ethers.utils.defaultAbiCoder.decode(['bytes'], log.data)[0]; + return { + l1BatchNumber: log.l1BatchNumber, + l2MessageIndex: proof.id, + l2TxNumberInBlock: l1BatchTxId, + message, + sender, + proof: proof.proof + }; + } + + async finalizeWithdrawal(withdrawalHash: BytesLike, index: number = 0, overrides?: ethers.Overrides) { + const { l1BatchNumber, l2MessageIndex, l2TxNumberInBlock, message, sender, proof } = + await this.finalizeWithdrawalParams(withdrawalHash, index); if (isETH(sender)) { const contractAddress = await this._providerL2().getMainContractAddress(); const zksync = IZkSyncFactory.connect(contractAddress, this._signerL1()); return await zksync.finalizeEthWithdrawal( - log.l1BatchNumber, - proof.id, - l1BatchTxId, + l1BatchNumber, + l2MessageIndex, + l2TxNumberInBlock, message, - proof.proof, + proof, overrides ?? {} ); } @@ -251,11 +288,11 @@ export function AdapterL1>(Base: TBase) { const l2Bridge = IL2BridgeFactory.connect(sender, this._providerL2()); const l1Bridge = IL1BridgeFactory.connect(await l2Bridge.l1Bridge(), this._signerL1()); return await l1Bridge.finalizeWithdrawal( - log.l1BatchNumber, - proof.id, - l1BatchTxId, + l1BatchNumber, + l2MessageIndex, + l2TxNumberInBlock, message, - proof.proof, + proof, overrides ?? {} ); } @@ -319,7 +356,7 @@ export function AdapterL1>(Base: TBase) { async requestExecute(transaction: { contractAddress: Address; calldata: BytesLike; - l2GasLimit: BigNumberish; + l2GasLimit?: BigNumberish; l2Value?: BigNumberish; factoryDeps?: ethers.BytesLike[]; operatorTip?: BigNumberish; @@ -327,6 +364,36 @@ export function AdapterL1>(Base: TBase) { refundRecipient?: Address; overrides?: ethers.PayableOverrides; }): Promise { + const requestExecuteTx = await this.getRequestExecuteTx(transaction); + return this._providerL2().getPriorityOpResponse(await this._signerL1().sendTransaction(requestExecuteTx)); + } + + async estimateGasRequestExecute(transaction: { + contractAddress: Address; + calldata: BytesLike; + l2GasLimit?: BigNumberish; + l2Value?: BigNumberish; + factoryDeps?: ethers.BytesLike[]; + operatorTip?: BigNumberish; + gasPerPubdataByte?: BigNumberish; + refundRecipient?: Address; + overrides?: ethers.PayableOverrides; + }): Promise { + const requestExecuteTx = await this.getRequestExecuteTx(transaction); + return this._providerL1().estimateGas(requestExecuteTx); + } + + async getRequestExecuteTx(transaction: { + contractAddress: Address; + calldata: BytesLike; + l2GasLimit?: BigNumberish; + l2Value?: BigNumberish; + factoryDeps?: ethers.BytesLike[]; + operatorTip?: BigNumberish; + gasPerPubdataByte?: BigNumberish; + refundRecipient?: Address; + overrides?: ethers.PayableOverrides; + }): Promise { const zksyncContract = await this.getMainContract(); const { ...tx } = transaction; @@ -334,8 +401,9 @@ export function AdapterL1>(Base: TBase) { tx.operatorTip ??= BigNumber.from(0); tx.factoryDeps ??= []; tx.overrides ??= {}; - tx.gasPerPubdataByte ??= DEPOSIT_GAS_PER_PUBDATA_LIMIT; + tx.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; tx.refundRecipient ??= await this.getAddress(); + tx.l2GasLimit ??= await this._providerL2().estimateL1ToL2Execute(transaction); const { contractAddress, @@ -349,7 +417,6 @@ export function AdapterL1>(Base: TBase) { refundRecipient } = tx; overrides.gasPrice ??= await this._providerL1().getGasPrice(); - overrides.gasLimit ??= BigNumber.from(RECOMMENDED_GAS_LIMIT.EXECUTE); const baseCost = await this.getBaseCost({ gasPrice: await overrides.gasPrice, @@ -361,17 +428,15 @@ export function AdapterL1>(Base: TBase) { await checkBaseCost(baseCost, overrides.value); - return this._providerL2().getPriorityOpResponse( - await zksyncContract.requestL2Transaction( - contractAddress, - l2Value, - calldata, - l2GasLimit, - DEPOSIT_GAS_PER_PUBDATA_LIMIT, - factoryDeps, - refundRecipient, - overrides - ) + return await zksyncContract.populateTransaction.requestL2Transaction( + contractAddress, + l2Value, + calldata, + l2GasLimit, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT, + factoryDeps, + refundRecipient, + overrides ); } }; diff --git a/sdk/zksync-web3.js/src/provider.ts b/sdk/zksync-web3.js/src/provider.ts index 2982a74e9e41..fd6ce694f18c 100644 --- a/sdk/zksync-web3.js/src/provider.ts +++ b/sdk/zksync-web3.js/src/provider.ts @@ -31,7 +31,8 @@ import { ETH_ADDRESS, parseTransaction, sleep, - L2_ETH_TOKEN_ADDRESS + L2_ETH_TOKEN_ADDRESS, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT } from './utils'; import { Signer } from './signer'; @@ -242,6 +243,25 @@ export class Provider extends ethers.providers.JsonRpcProvider { } } + async estimateGasL1(transaction: utils.Deferrable): Promise { + await this.getNetwork(); + const params = await utils.resolveProperties({ + transaction: this._getTransactionRequest(transaction) + }); + if (transaction.customData != null) { + // @ts-ignore + params.transaction.customData = transaction.customData; + } + const result = await this.send('zks_estimateGasL1ToL2', [ + Provider.hexlifyTransaction(params.transaction, { from: true }) + ]); + try { + return BigNumber.from(result); + } catch (error) { + throw new Error(`bad result from backend (zks_estimateGasL1ToL2): ${result}`); + } + } + override async getGasPrice(token?: Address): Promise { const params = token ? [token] : []; const price = await this.send('eth_gasPrice', params); @@ -566,6 +586,40 @@ export class Provider extends ethers.providers.JsonRpcProvider { nonceOrdering: data.nonceOrdering }; } + + async estimateL1ToL2Execute(transaction: { + contractAddress: Address; + calldata: BytesLike; + caller?: Address; + l2Value?: BigNumberish; + factoryDeps?: ethers.BytesLike[]; + gasPerPubdataByte?: BigNumberish; + overrides?: ethers.PayableOverrides; + }): Promise { + transaction.gasPerPubdataByte ??= REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT; + + // If the `from` address is not provided, we use a random address, because + // due to storage slot aggregation, the gas estimation will depend on the address + // and so estimation for the zero address may be smaller than for the sender. + transaction.caller ??= ethers.Wallet.createRandom().address; + + const customData = { + gasPerPubdataByte: transaction.gasPerPubdataByte + }; + if (transaction.factoryDeps) { + Object.assign(customData, { factoryDeps: transaction.factoryDeps }); + } + + const fee = await this.estimateGasL1({ + from: transaction.caller, + data: transaction.calldata, + to: transaction.contractAddress, + value: transaction.l2Value, + customData + }); + + return fee; + } } export class Web3Provider extends Provider { diff --git a/sdk/zksync-web3.js/src/utils.ts b/sdk/zksync-web3.js/src/utils.ts index b857ae9ba2b0..599aecedb1e6 100644 --- a/sdk/zksync-web3.js/src/utils.ts +++ b/sdk/zksync-web3.js/src/utils.ts @@ -12,6 +12,8 @@ import { import { TypedDataDomain, TypedDataField } from '@ethersproject/abstract-signer'; import { Provider } from './provider'; import { EIP712Signer } from './signer'; +import { IERC20MetadataFactory } from '../typechain'; +import { AbiCoder } from 'ethers/lib/utils'; export * from './paymaster-utils'; @@ -47,16 +49,7 @@ export const DEFAULT_GAS_PER_PUBDATA_LIMIT = 50000; // It is possible to provide practically any gasPerPubdataByte for L1->L2 transactions, since // the cost per gas will be adjusted respectively. We will use 800 as an relatively optimal value for now. -export const DEPOSIT_GAS_PER_PUBDATA_LIMIT = 800; - -// The recommended L2 gas limit for a deposit. -export const RECOMMENDED_DEPOSIT_L2_GAS_LIMIT = 10000000; - -export const RECOMMENDED_GAS_LIMIT = { - DEPOSIT: 600_000, - EXECUTE: 620_000, - ERC20_APPROVE: 50_000 -}; +export const REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_LIMIT = 800; export function isETH(token: Address) { return token.toLowerCase() == ETH_ADDRESS || token.toLowerCase() == L2_ETH_TOKEN_ADDRESS; @@ -376,12 +369,55 @@ export function getL2HashFromPriorityOp( return txHash; } +const ADDRESS_MODULO = BigNumber.from(2).pow(160); + export function applyL1ToL2Alias(address: string): string { - return ethers.utils.hexlify(ethers.BigNumber.from(address).add(L1_TO_L2_ALIAS_OFFSET)); + return ethers.utils.hexlify(ethers.BigNumber.from(address).add(L1_TO_L2_ALIAS_OFFSET).mod(ADDRESS_MODULO)); } export function undoL1ToL2Alias(address: string): string { - return ethers.utils.hexlify(ethers.BigNumber.from(address).sub(L1_TO_L2_ALIAS_OFFSET)); + let result = ethers.BigNumber.from(address).sub(L1_TO_L2_ALIAS_OFFSET); + if (result.lt(BigNumber.from(0))) { + result = result.add(ADDRESS_MODULO); + } + + return ethers.utils.hexlify(result); +} + +/// Getters data used to correctly initialize the L1 token counterpart on L2 +async function getERC20GettersData(l1TokenAddress: string, provider: ethers.providers.Provider): Promise { + const token = IERC20MetadataFactory.connect(l1TokenAddress, provider); + + const name = await token.name(); + const symbol = await token.symbol(); + const decimals = await token.decimals(); + + const coder = new AbiCoder(); + + const nameBytes = coder.encode(['string'], [name]); + const symbolBytes = coder.encode(['string'], [symbol]); + const decimalsBytes = coder.encode(['uint256'], [decimals]); + + return coder.encode(['bytes', 'bytes', 'bytes'], [nameBytes, symbolBytes, decimalsBytes]); +} + +/// The method that returns the calldata that will be sent by an L1 ERC20 bridge to its L2 counterpart +/// during bridging of a token. +export async function getERC20BridgeCalldata( + l1TokenAddress: string, + l1Sender: string, + l2Receiver: string, + amount: BigNumberish, + provider: ethers.providers.Provider +): Promise { + const gettersData = await getERC20GettersData(l1TokenAddress, provider); + return L2_BRIDGE_ABI.encodeFunctionData('finalizeDeposit', [ + l1Sender, + l2Receiver, + l1TokenAddress, + amount, + gettersData + ]); } // The method with similar functionality is already available in ethers.js, @@ -461,3 +497,40 @@ export async function isTypedDataSignatureCorrect( const msgHash = ethers.utils._TypedDataEncoder.hash(domain, types, value); return await isSignatureCorrect(provider, address, msgHash, signature); } + +export async function estimateDefaultBridgeDepositL2Gas( + providerL1: ethers.providers.Provider, + providerL2: Provider, + token: Address, + amount: BigNumberish, + to: Address, + from?: Address, + gasPerPubdataByte?: BigNumberish +): Promise { + // If the `from` address is not provided, we use a random address, because + // due to storage slot aggregation, the gas estimation will depend on the address + // and so estimation for the zero address may be smaller than for the sender. + from ??= ethers.Wallet.createRandom().address; + + if (token == ETH_ADDRESS) { + return await providerL2.estimateL1ToL2Execute({ + contractAddress: to, + gasPerPubdataByte: gasPerPubdataByte, + caller: from, + calldata: '0x', + l2Value: amount + }); + } else { + const l1ERC20BridgeAddresses = (await providerL2.getDefaultBridgeAddresses()).erc20L1; + const erc20BridgeAddress = (await providerL2.getDefaultBridgeAddresses()).erc20L2; + + const calldata = await getERC20BridgeCalldata(token, from, to, amount, providerL1); + + return await providerL2.estimateL1ToL2Execute({ + caller: applyL1ToL2Alias(l1ERC20BridgeAddresses), + contractAddress: erc20BridgeAddress, + gasPerPubdataByte: gasPerPubdataByte, + calldata: calldata + }); + } +} diff --git a/sdk/zksync-web3.js/typechain/IAllowList.d.ts b/sdk/zksync-web3.js/typechain/IAllowList.d.ts index 44150193a204..c7e4e1e3dbc2 100644 --- a/sdk/zksync-web3.js/typechain/IAllowList.d.ts +++ b/sdk/zksync-web3.js/typechain/IAllowList.d.ts @@ -22,111 +22,95 @@ import { FunctionFragment, EventFragment, Result } from "@ethersproject/abi"; interface IAllowListInterface extends ethers.utils.Interface { functions: { - "acceptOwner()": FunctionFragment; "canCall(address,address,bytes4)": FunctionFragment; + "getAccessMode(address)": FunctionFragment; + "getTokenDepositLimitData(address)": FunctionFragment; "hasSpecialAccessToCall(address,address,bytes4)": FunctionFragment; - "isAccessPublic(address)": FunctionFragment; - "owner()": FunctionFragment; - "pendingOwner()": FunctionFragment; + "setAccessMode(address,uint8)": FunctionFragment; + "setBatchAccessMode(address[],uint8[])": FunctionFragment; "setBatchPermissionToCall(address[],address[],bytes4[],bool[])": FunctionFragment; - "setBatchPublicAccess(address[],bool[])": FunctionFragment; - "setPendingOwner(address)": FunctionFragment; + "setDepositLimit(address,bool,uint256)": FunctionFragment; "setPermissionToCall(address,address,bytes4,bool)": FunctionFragment; - "setPublicAccess(address,bool)": FunctionFragment; }; - encodeFunctionData( - functionFragment: "acceptOwner", - values?: undefined - ): string; encodeFunctionData( functionFragment: "canCall", values: [string, string, BytesLike] ): string; + encodeFunctionData( + functionFragment: "getAccessMode", + values: [string] + ): string; + encodeFunctionData( + functionFragment: "getTokenDepositLimitData", + values: [string] + ): string; encodeFunctionData( functionFragment: "hasSpecialAccessToCall", values: [string, string, BytesLike] ): string; encodeFunctionData( - functionFragment: "isAccessPublic", - values: [string] + functionFragment: "setAccessMode", + values: [string, BigNumberish] ): string; - encodeFunctionData(functionFragment: "owner", values?: undefined): string; encodeFunctionData( - functionFragment: "pendingOwner", - values?: undefined + functionFragment: "setBatchAccessMode", + values: [string[], BigNumberish[]] ): string; encodeFunctionData( functionFragment: "setBatchPermissionToCall", values: [string[], string[], BytesLike[], boolean[]] ): string; encodeFunctionData( - functionFragment: "setBatchPublicAccess", - values: [string[], boolean[]] - ): string; - encodeFunctionData( - functionFragment: "setPendingOwner", - values: [string] + functionFragment: "setDepositLimit", + values: [string, boolean, BigNumberish] ): string; encodeFunctionData( functionFragment: "setPermissionToCall", values: [string, string, BytesLike, boolean] ): string; - encodeFunctionData( - functionFragment: "setPublicAccess", - values: [string, boolean] - ): string; - decodeFunctionResult( - functionFragment: "acceptOwner", - data: BytesLike - ): Result; decodeFunctionResult(functionFragment: "canCall", data: BytesLike): Result; decodeFunctionResult( - functionFragment: "hasSpecialAccessToCall", + functionFragment: "getAccessMode", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "isAccessPublic", + functionFragment: "getTokenDepositLimitData", data: BytesLike ): Result; - decodeFunctionResult(functionFragment: "owner", data: BytesLike): Result; decodeFunctionResult( - functionFragment: "pendingOwner", + functionFragment: "hasSpecialAccessToCall", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "setBatchPermissionToCall", + functionFragment: "setAccessMode", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "setBatchPublicAccess", + functionFragment: "setBatchAccessMode", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "setPendingOwner", + functionFragment: "setBatchPermissionToCall", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "setPermissionToCall", + functionFragment: "setDepositLimit", data: BytesLike ): Result; decodeFunctionResult( - functionFragment: "setPublicAccess", + functionFragment: "setPermissionToCall", data: BytesLike ): Result; events: { - "NewOwner(address)": EventFragment; - "NewPendingOwner(address,address)": EventFragment; + "UpdateAccessMode(address,uint8,uint8)": EventFragment; "UpdateCallPermission(address,address,bytes4,bool)": EventFragment; - "UpdatePublicAccess(address,bool)": EventFragment; }; - getEvent(nameOrSignatureOrTopic: "NewOwner"): EventFragment; - getEvent(nameOrSignatureOrTopic: "NewPendingOwner"): EventFragment; + getEvent(nameOrSignatureOrTopic: "UpdateAccessMode"): EventFragment; getEvent(nameOrSignatureOrTopic: "UpdateCallPermission"): EventFragment; - getEvent(nameOrSignatureOrTopic: "UpdatePublicAccess"): EventFragment; } export class IAllowList extends Contract { @@ -143,10 +127,6 @@ export class IAllowList extends Contract { interface: IAllowListInterface; functions: { - acceptOwner(overrides?: Overrides): Promise; - - "acceptOwner()"(overrides?: Overrides): Promise; - canCall( _caller: string, _target: string, @@ -165,89 +145,113 @@ export class IAllowList extends Contract { 0: boolean; }>; - hasSpecialAccessToCall( - _caller: string, + getAccessMode( _target: string, - _functionSig: BytesLike, overrides?: CallOverrides ): Promise<{ - 0: boolean; + 0: number; }>; - "hasSpecialAccessToCall(address,address,bytes4)"( - _caller: string, + "getAccessMode(address)"( _target: string, - _functionSig: BytesLike, overrides?: CallOverrides ): Promise<{ - 0: boolean; + 0: number; }>; - isAccessPublic( - _target: string, + getTokenDepositLimitData( + _l1Token: string, overrides?: CallOverrides ): Promise<{ - 0: boolean; + 0: { + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }; }>; - "isAccessPublic(address)"( - _target: string, + "getTokenDepositLimitData(address)"( + _l1Token: string, overrides?: CallOverrides ): Promise<{ - 0: boolean; + 0: { + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }; }>; - owner(overrides?: CallOverrides): Promise<{ - 0: string; + hasSpecialAccessToCall( + _caller: string, + _target: string, + _functionSig: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; }>; - "owner()"(overrides?: CallOverrides): Promise<{ - 0: string; + "hasSpecialAccessToCall(address,address,bytes4)"( + _caller: string, + _target: string, + _functionSig: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; }>; - pendingOwner(overrides?: CallOverrides): Promise<{ - 0: string; - }>; + setAccessMode( + _target: string, + _accessMode: BigNumberish, + overrides?: Overrides + ): Promise; - "pendingOwner()"(overrides?: CallOverrides): Promise<{ - 0: string; - }>; + "setAccessMode(address,uint8)"( + _target: string, + _accessMode: BigNumberish, + overrides?: Overrides + ): Promise; - setBatchPermissionToCall( - _callers: string[], + setBatchAccessMode( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( - _callers: string[], + "setBatchAccessMode(address[],uint8[])"( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - setBatchPublicAccess( + setBatchPermissionToCall( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - "setBatchPublicAccess(address[],bool[])"( + "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - setPendingOwner( - _newPendingOwner: string, + setDepositLimit( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; - "setPendingOwner(address)"( - _newPendingOwner: string, + "setDepositLimit(address,bool,uint256)"( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; @@ -266,24 +270,8 @@ export class IAllowList extends Contract { _enable: boolean, overrides?: Overrides ): Promise; - - setPublicAccess( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; - - "setPublicAccess(address,bool)"( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; }; - acceptOwner(overrides?: Overrides): Promise; - - "acceptOwner()"(overrides?: Overrides): Promise; - canCall( _caller: string, _target: string, @@ -298,6 +286,33 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; + getAccessMode(_target: string, overrides?: CallOverrides): Promise; + + "getAccessMode(address)"( + _target: string, + overrides?: CallOverrides + ): Promise; + + getTokenDepositLimitData( + _l1Token: string, + overrides?: CallOverrides + ): Promise<{ + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }>; + + "getTokenDepositLimitData(address)"( + _l1Token: string, + overrides?: CallOverrides + ): Promise<{ + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }>; + hasSpecialAccessToCall( _caller: string, _target: string, @@ -312,56 +327,57 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; - isAccessPublic(_target: string, overrides?: CallOverrides): Promise; - - "isAccessPublic(address)"( + setAccessMode( _target: string, - overrides?: CallOverrides - ): Promise; - - owner(overrides?: CallOverrides): Promise; - - "owner()"(overrides?: CallOverrides): Promise; - - pendingOwner(overrides?: CallOverrides): Promise; + _accessMode: BigNumberish, + overrides?: Overrides + ): Promise; - "pendingOwner()"(overrides?: CallOverrides): Promise; + "setAccessMode(address,uint8)"( + _target: string, + _accessMode: BigNumberish, + overrides?: Overrides + ): Promise; - setBatchPermissionToCall( - _callers: string[], + setBatchAccessMode( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( - _callers: string[], + "setBatchAccessMode(address[],uint8[])"( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - setBatchPublicAccess( + setBatchPermissionToCall( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - "setBatchPublicAccess(address[],bool[])"( + "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - setPendingOwner( - _newPendingOwner: string, + setDepositLimit( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; - "setPendingOwner(address)"( - _newPendingOwner: string, + "setDepositLimit(address,bool,uint256)"( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; @@ -381,23 +397,7 @@ export class IAllowList extends Contract { overrides?: Overrides ): Promise; - setPublicAccess( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; - - "setPublicAccess(address,bool)"( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; - callStatic: { - acceptOwner(overrides?: CallOverrides): Promise; - - "acceptOwner()"(overrides?: CallOverrides): Promise; - canCall( _caller: string, _target: string, @@ -412,6 +412,33 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; + getAccessMode(_target: string, overrides?: CallOverrides): Promise; + + "getAccessMode(address)"( + _target: string, + overrides?: CallOverrides + ): Promise; + + getTokenDepositLimitData( + _l1Token: string, + overrides?: CallOverrides + ): Promise<{ + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }>; + + "getTokenDepositLimitData(address)"( + _l1Token: string, + overrides?: CallOverrides + ): Promise<{ + depositLimitation: boolean; + depositCap: BigNumber; + 0: boolean; + 1: BigNumber; + }>; + hasSpecialAccessToCall( _caller: string, _target: string, @@ -426,59 +453,57 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; - isAccessPublic( + setAccessMode( _target: string, + _accessMode: BigNumberish, overrides?: CallOverrides - ): Promise; + ): Promise; - "isAccessPublic(address)"( + "setAccessMode(address,uint8)"( _target: string, + _accessMode: BigNumberish, overrides?: CallOverrides - ): Promise; - - owner(overrides?: CallOverrides): Promise; - - "owner()"(overrides?: CallOverrides): Promise; - - pendingOwner(overrides?: CallOverrides): Promise; - - "pendingOwner()"(overrides?: CallOverrides): Promise; + ): Promise; - setBatchPermissionToCall( - _callers: string[], + setBatchAccessMode( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: CallOverrides ): Promise; - "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( - _callers: string[], + "setBatchAccessMode(address[],uint8[])"( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: CallOverrides ): Promise; - setBatchPublicAccess( + setBatchPermissionToCall( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: CallOverrides ): Promise; - "setBatchPublicAccess(address[],bool[])"( + "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: CallOverrides ): Promise; - setPendingOwner( - _newPendingOwner: string, + setDepositLimit( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: CallOverrides ): Promise; - "setPendingOwner(address)"( - _newPendingOwner: string, + "setDepositLimit(address,bool,uint256)"( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: CallOverrides ): Promise; @@ -497,26 +522,13 @@ export class IAllowList extends Contract { _enable: boolean, overrides?: CallOverrides ): Promise; - - setPublicAccess( - _target: string, - _enable: boolean, - overrides?: CallOverrides - ): Promise; - - "setPublicAccess(address,bool)"( - _target: string, - _enable: boolean, - overrides?: CallOverrides - ): Promise; }; filters: { - NewOwner(newOwner: string | null): EventFilter; - - NewPendingOwner( - oldPendingOwner: string | null, - newPendingOwner: string | null + UpdateAccessMode( + target: string | null, + previousMode: null, + newMode: null ): EventFilter; UpdateCallPermission( @@ -525,15 +537,9 @@ export class IAllowList extends Contract { functionSig: BytesLike | null, status: null ): EventFilter; - - UpdatePublicAccess(target: string | null, newStatus: null): EventFilter; }; estimateGas: { - acceptOwner(overrides?: Overrides): Promise; - - "acceptOwner()"(overrides?: Overrides): Promise; - canCall( _caller: string, _target: string, @@ -548,6 +554,26 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; + getAccessMode( + _target: string, + overrides?: CallOverrides + ): Promise; + + "getAccessMode(address)"( + _target: string, + overrides?: CallOverrides + ): Promise; + + getTokenDepositLimitData( + _l1Token: string, + overrides?: CallOverrides + ): Promise; + + "getTokenDepositLimitData(address)"( + _l1Token: string, + overrides?: CallOverrides + ): Promise; + hasSpecialAccessToCall( _caller: string, _target: string, @@ -562,59 +588,57 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; - isAccessPublic( + setAccessMode( _target: string, - overrides?: CallOverrides + _accessMode: BigNumberish, + overrides?: Overrides ): Promise; - "isAccessPublic(address)"( + "setAccessMode(address,uint8)"( _target: string, - overrides?: CallOverrides + _accessMode: BigNumberish, + overrides?: Overrides ): Promise; - owner(overrides?: CallOverrides): Promise; - - "owner()"(overrides?: CallOverrides): Promise; - - pendingOwner(overrides?: CallOverrides): Promise; - - "pendingOwner()"(overrides?: CallOverrides): Promise; - - setBatchPermissionToCall( - _callers: string[], + setBatchAccessMode( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( - _callers: string[], + "setBatchAccessMode(address[],uint8[])"( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - setBatchPublicAccess( + setBatchPermissionToCall( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - "setBatchPublicAccess(address[],bool[])"( + "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - setPendingOwner( - _newPendingOwner: string, + setDepositLimit( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; - "setPendingOwner(address)"( - _newPendingOwner: string, + "setDepositLimit(address,bool,uint256)"( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; @@ -633,25 +657,9 @@ export class IAllowList extends Contract { _enable: boolean, overrides?: Overrides ): Promise; - - setPublicAccess( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; - - "setPublicAccess(address,bool)"( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; }; populateTransaction: { - acceptOwner(overrides?: Overrides): Promise; - - "acceptOwner()"(overrides?: Overrides): Promise; - canCall( _caller: string, _target: string, @@ -666,6 +674,26 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; + getAccessMode( + _target: string, + overrides?: CallOverrides + ): Promise; + + "getAccessMode(address)"( + _target: string, + overrides?: CallOverrides + ): Promise; + + getTokenDepositLimitData( + _l1Token: string, + overrides?: CallOverrides + ): Promise; + + "getTokenDepositLimitData(address)"( + _l1Token: string, + overrides?: CallOverrides + ): Promise; + hasSpecialAccessToCall( _caller: string, _target: string, @@ -680,59 +708,57 @@ export class IAllowList extends Contract { overrides?: CallOverrides ): Promise; - isAccessPublic( + setAccessMode( _target: string, - overrides?: CallOverrides + _accessMode: BigNumberish, + overrides?: Overrides ): Promise; - "isAccessPublic(address)"( + "setAccessMode(address,uint8)"( _target: string, - overrides?: CallOverrides + _accessMode: BigNumberish, + overrides?: Overrides ): Promise; - owner(overrides?: CallOverrides): Promise; - - "owner()"(overrides?: CallOverrides): Promise; - - pendingOwner(overrides?: CallOverrides): Promise; - - "pendingOwner()"(overrides?: CallOverrides): Promise; - - setBatchPermissionToCall( - _callers: string[], + setBatchAccessMode( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( - _callers: string[], + "setBatchAccessMode(address[],uint8[])"( _targets: string[], - _functionSigs: BytesLike[], - _enables: boolean[], + _accessMode: BigNumberish[], overrides?: Overrides ): Promise; - setBatchPublicAccess( + setBatchPermissionToCall( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - "setBatchPublicAccess(address[],bool[])"( + "setBatchPermissionToCall(address[],address[],bytes4[],bool[])"( + _callers: string[], _targets: string[], + _functionSigs: BytesLike[], _enables: boolean[], overrides?: Overrides ): Promise; - setPendingOwner( - _newPendingOwner: string, + setDepositLimit( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; - "setPendingOwner(address)"( - _newPendingOwner: string, + "setDepositLimit(address,bool,uint256)"( + _l1Token: string, + _depositLimitation: boolean, + _depositCap: BigNumberish, overrides?: Overrides ): Promise; @@ -751,17 +777,5 @@ export class IAllowList extends Contract { _enable: boolean, overrides?: Overrides ): Promise; - - setPublicAccess( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; - - "setPublicAccess(address,bool)"( - _target: string, - _enable: boolean, - overrides?: Overrides - ): Promise; }; } diff --git a/sdk/zksync-web3.js/typechain/IAllowListFactory.ts b/sdk/zksync-web3.js/typechain/IAllowListFactory.ts index 67fa419cfa41..37ebcd08ce06 100644 --- a/sdk/zksync-web3.js/typechain/IAllowListFactory.ts +++ b/sdk/zksync-web3.js/typechain/IAllowListFactory.ts @@ -23,30 +23,23 @@ const _abi = [ { indexed: true, internalType: "address", - name: "newOwner", + name: "target", type: "address", }, - ], - name: "NewOwner", - type: "event", - }, - { - anonymous: false, - inputs: [ { - indexed: true, - internalType: "address", - name: "oldPendingOwner", - type: "address", + indexed: false, + internalType: "enum IAllowList.AccessMode", + name: "previousMode", + type: "uint8", }, { - indexed: true, - internalType: "address", - name: "newPendingOwner", - type: "address", + indexed: false, + internalType: "enum IAllowList.AccessMode", + name: "newMode", + type: "uint8", }, ], - name: "NewPendingOwner", + name: "UpdateAccessMode", type: "event", }, { @@ -81,55 +74,79 @@ const _abi = [ type: "event", }, { - anonymous: false, inputs: [ { - indexed: true, internalType: "address", - name: "target", + name: "_caller", type: "address", }, { - indexed: false, + internalType: "address", + name: "_target", + type: "address", + }, + { + internalType: "bytes4", + name: "_functionSig", + type: "bytes4", + }, + ], + name: "canCall", + outputs: [ + { internalType: "bool", - name: "newStatus", + name: "", type: "bool", }, ], - name: "UpdatePublicAccess", - type: "event", - }, - { - inputs: [], - name: "acceptOwner", - outputs: [], - stateMutability: "nonpayable", + stateMutability: "view", type: "function", }, { inputs: [ { internalType: "address", - name: "_caller", + name: "_target", type: "address", }, + ], + name: "getAccessMode", + outputs: [ { - internalType: "address", - name: "_target", - type: "address", + internalType: "enum IAllowList.AccessMode", + name: "", + type: "uint8", }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ { - internalType: "bytes4", - name: "_functionSig", - type: "bytes4", + internalType: "address", + name: "_l1Token", + type: "address", }, ], - name: "canCall", + name: "getTokenDepositLimitData", outputs: [ { - internalType: "bool", + components: [ + { + internalType: "bool", + name: "depositLimitation", + type: "bool", + }, + { + internalType: "uint256", + name: "depositCap", + type: "uint256", + }, + ], + internalType: "struct IAllowList.Deposit", name: "", - type: "bool", + type: "tuple", }, ], stateMutability: "view", @@ -171,42 +188,33 @@ const _abi = [ name: "_target", type: "address", }, - ], - name: "isAccessPublic", - outputs: [ { - internalType: "bool", - name: "", - type: "bool", + internalType: "enum IAllowList.AccessMode", + name: "_accessMode", + type: "uint8", }, ], - stateMutability: "view", + name: "setAccessMode", + outputs: [], + stateMutability: "nonpayable", type: "function", }, { - inputs: [], - name: "owner", - outputs: [ + inputs: [ { - internalType: "address", - name: "", - type: "address", + internalType: "address[]", + name: "_targets", + type: "address[]", }, - ], - stateMutability: "view", - type: "function", - }, - { - inputs: [], - name: "pendingOwner", - outputs: [ { - internalType: "address", - name: "", - type: "address", + internalType: "enum IAllowList.AccessMode[]", + name: "_accessMode", + type: "uint8[]", }, ], - stateMutability: "view", + name: "setBatchAccessMode", + outputs: [], + stateMutability: "nonpayable", type: "function", }, { @@ -240,30 +248,22 @@ const _abi = [ { inputs: [ { - internalType: "address[]", - name: "_targets", - type: "address[]", + internalType: "address", + name: "_l1Token", + type: "address", }, { - internalType: "bool[]", - name: "_enables", - type: "bool[]", + internalType: "bool", + name: "_depositLimitation", + type: "bool", }, - ], - name: "setBatchPublicAccess", - outputs: [], - stateMutability: "nonpayable", - type: "function", - }, - { - inputs: [ { - internalType: "address", - name: "_newPendingOwner", - type: "address", + internalType: "uint256", + name: "_depositCap", + type: "uint256", }, ], - name: "setPendingOwner", + name: "setDepositLimit", outputs: [], stateMutability: "nonpayable", type: "function", @@ -296,22 +296,4 @@ const _abi = [ stateMutability: "nonpayable", type: "function", }, - { - inputs: [ - { - internalType: "address", - name: "_target", - type: "address", - }, - { - internalType: "bool", - name: "_enable", - type: "bool", - }, - ], - name: "setPublicAccess", - outputs: [], - stateMutability: "nonpayable", - type: "function", - }, ]; diff --git a/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts b/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts index 778df3998e34..e770ca81975f 100644 --- a/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts +++ b/sdk/zksync-web3.js/typechain/IL1Bridge.d.ts @@ -534,4 +534,4 @@ export class IL1Bridge extends Contract { overrides?: CallOverrides ): Promise; }; -} +} \ No newline at end of file diff --git a/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts b/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts index 4b6b8dd20ceb..425118662f2d 100644 --- a/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts +++ b/sdk/zksync-web3.js/typechain/IL1BridgeFactory.ts @@ -256,4 +256,4 @@ const _abi = [ stateMutability: "view", type: "function", }, -]; +]; \ No newline at end of file diff --git a/sdk/zksync-web3.js/typechain/IL2Bridge.d.ts b/sdk/zksync-web3.js/typechain/IL2Bridge.d.ts index e7742f25c5f6..7e3e33ea2e4a 100644 --- a/sdk/zksync-web3.js/typechain/IL2Bridge.d.ts +++ b/sdk/zksync-web3.js/typechain/IL2Bridge.d.ts @@ -23,6 +23,7 @@ import { FunctionFragment, EventFragment, Result } from "@ethersproject/abi"; interface IL2BridgeInterface extends ethers.utils.Interface { functions: { "finalizeDeposit(address,address,address,uint256,bytes)": FunctionFragment; + "initialize(address,bytes32,address)": FunctionFragment; "l1Bridge()": FunctionFragment; "l1TokenAddress(address)": FunctionFragment; "l2TokenAddress(address)": FunctionFragment; @@ -33,6 +34,10 @@ interface IL2BridgeInterface extends ethers.utils.Interface { functionFragment: "finalizeDeposit", values: [string, string, string, BigNumberish, BytesLike] ): string; + encodeFunctionData( + functionFragment: "initialize", + values: [string, BytesLike, string] + ): string; encodeFunctionData(functionFragment: "l1Bridge", values?: undefined): string; encodeFunctionData( functionFragment: "l1TokenAddress", @@ -51,6 +56,7 @@ interface IL2BridgeInterface extends ethers.utils.Interface { functionFragment: "finalizeDeposit", data: BytesLike ): Result; + decodeFunctionResult(functionFragment: "initialize", data: BytesLike): Result; decodeFunctionResult(functionFragment: "l1Bridge", data: BytesLike): Result; decodeFunctionResult( functionFragment: "l1TokenAddress", @@ -97,6 +103,20 @@ export class IL2Bridge extends Contract { overrides?: Overrides ): Promise; + initialize( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + + "initialize(address,bytes32,address)"( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + l1Bridge(overrides?: CallOverrides): Promise<{ 0: string; }>; @@ -166,6 +186,20 @@ export class IL2Bridge extends Contract { overrides?: Overrides ): Promise; + initialize( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + + "initialize(address,bytes32,address)"( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + l1Bridge(overrides?: CallOverrides): Promise; "l1Bridge()"(overrides?: CallOverrides): Promise; @@ -217,6 +251,20 @@ export class IL2Bridge extends Contract { overrides?: CallOverrides ): Promise; + initialize( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: CallOverrides + ): Promise; + + "initialize(address,bytes32,address)"( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: CallOverrides + ): Promise; + l1Bridge(overrides?: CallOverrides): Promise; "l1Bridge()"(overrides?: CallOverrides): Promise; @@ -277,6 +325,20 @@ export class IL2Bridge extends Contract { overrides?: Overrides ): Promise; + initialize( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + + "initialize(address,bytes32,address)"( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + l1Bridge(overrides?: CallOverrides): Promise; "l1Bridge()"(overrides?: CallOverrides): Promise; @@ -335,6 +397,20 @@ export class IL2Bridge extends Contract { overrides?: Overrides ): Promise; + initialize( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + + "initialize(address,bytes32,address)"( + _l1Bridge: string, + _l2TokenProxyBytecodeHash: BytesLike, + _governor: string, + overrides?: Overrides + ): Promise; + l1Bridge(overrides?: CallOverrides): Promise; "l1Bridge()"(overrides?: CallOverrides): Promise; diff --git a/sdk/zksync-web3.js/typechain/IL2BridgeFactory.ts b/sdk/zksync-web3.js/typechain/IL2BridgeFactory.ts index 94ffa3561842..5de8477077d4 100644 --- a/sdk/zksync-web3.js/typechain/IL2BridgeFactory.ts +++ b/sdk/zksync-web3.js/typechain/IL2BridgeFactory.ts @@ -50,6 +50,29 @@ const _abi = [ stateMutability: "nonpayable", type: "function", }, + { + inputs: [ + { + internalType: "address", + name: "_l1Bridge", + type: "address", + }, + { + internalType: "bytes32", + name: "_l2TokenProxyBytecodeHash", + type: "bytes32", + }, + { + internalType: "address", + name: "_governor", + type: "address", + }, + ], + name: "initialize", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, { inputs: [], name: "l1Bridge", diff --git a/sdk/zksync-web3.js/typechain/IZkSync.d.ts b/sdk/zksync-web3.js/typechain/IZkSync.d.ts index 803c97f47e71..34d2062cb957 100644 --- a/sdk/zksync-web3.js/typechain/IZkSync.d.ts +++ b/sdk/zksync-web3.js/typechain/IZkSync.d.ts @@ -41,7 +41,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { "getL2DefaultAccountBytecodeHash()": FunctionFragment; "getPendingGovernor()": FunctionFragment; "getPriorityQueueSize()": FunctionFragment; - "getPriorityTxMaxGasLimit()": FunctionFragment; "getProposedUpgradeHash()": FunctionFragment; "getProposedUpgradeTimestamp()": FunctionFragment; "getSecurityCouncil()": FunctionFragment; @@ -52,6 +51,7 @@ interface IZkSyncInterface extends ethers.utils.Interface { "getUpgradeProposalState()": FunctionFragment; "getVerifier()": FunctionFragment; "getVerifierParams()": FunctionFragment; + "getpriorityTxMaxGasLimit()": FunctionFragment; "isApprovedBySecurityCouncil()": FunctionFragment; "isDiamondStorageFrozen()": FunctionFragment; "isEthWithdrawalFinalized(uint256,uint256)": FunctionFragment; @@ -70,7 +70,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { "requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)": FunctionFragment; "revertBlocks(uint256)": FunctionFragment; "securityCouncilUpgradeApprove(bytes32)": FunctionFragment; - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)": FunctionFragment; "setL2BootloaderBytecodeHash(bytes32)": FunctionFragment; "setL2DefaultAccountBytecodeHash(bytes32)": FunctionFragment; "setPendingGovernor(address)": FunctionFragment; @@ -201,10 +200,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "getPriorityQueueSize", values?: undefined ): string; - encodeFunctionData( - functionFragment: "getPriorityTxMaxGasLimit", - values?: undefined - ): string; encodeFunctionData( functionFragment: "getProposedUpgradeHash", values?: undefined @@ -245,6 +240,10 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "getVerifierParams", values?: undefined ): string; + encodeFunctionData( + functionFragment: "getpriorityTxMaxGasLimit", + values?: undefined + ): string; encodeFunctionData( functionFragment: "isApprovedBySecurityCouncil", values?: undefined @@ -383,21 +382,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "securityCouncilUpgradeApprove", values: [BytesLike] ): string; - encodeFunctionData( - functionFragment: "serializeL2Transaction", - values: [ - BigNumberish, - BigNumberish, - string, - string, - BytesLike, - BigNumberish, - BigNumberish, - BytesLike[], - BigNumberish, - string - ] - ): string; encodeFunctionData( functionFragment: "setL2BootloaderBytecodeHash", values: [BytesLike] @@ -528,10 +512,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "getPriorityQueueSize", data: BytesLike ): Result; - decodeFunctionResult( - functionFragment: "getPriorityTxMaxGasLimit", - data: BytesLike - ): Result; decodeFunctionResult( functionFragment: "getProposedUpgradeHash", data: BytesLike @@ -572,6 +552,10 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "getVerifierParams", data: BytesLike ): Result; + decodeFunctionResult( + functionFragment: "getpriorityTxMaxGasLimit", + data: BytesLike + ): Result; decodeFunctionResult( functionFragment: "isApprovedBySecurityCouncil", data: BytesLike @@ -644,10 +628,6 @@ interface IZkSyncInterface extends ethers.utils.Interface { functionFragment: "securityCouncilUpgradeApprove", data: BytesLike ): Result; - decodeFunctionResult( - functionFragment: "serializeL2Transaction", - data: BytesLike - ): Result; decodeFunctionResult( functionFragment: "setL2BootloaderBytecodeHash", data: BytesLike @@ -1018,14 +998,6 @@ export class IZkSync extends Contract { 0: BigNumber; }>; - getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise<{ - 0: BigNumber; - }>; - - "getPriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise<{ - 0: BigNumber; - }>; - getProposedUpgradeHash(overrides?: CallOverrides): Promise<{ 0: string; }>; @@ -1120,6 +1092,14 @@ export class IZkSync extends Contract { }; }>; + getpriorityTxMaxGasLimit(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + "getpriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise<{ 0: boolean; }>; @@ -1214,8 +1194,8 @@ export class IZkSync extends Contract { l2TransactionBaseCost( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise<{ 0: BigNumber; @@ -1223,8 +1203,8 @@ export class IZkSync extends Contract { "l2TransactionBaseCost(uint256,uint256,uint256)"( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise<{ 0: BigNumber; @@ -1440,8 +1420,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -1451,8 +1431,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -1478,104 +1458,6 @@ export class IZkSync extends Contract { overrides?: Overrides ): Promise; - serializeL2Transaction( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - 0: { - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }; - }>; - - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)"( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - 0: { - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }; - }>; - setL2BootloaderBytecodeHash( _l2BootloaderBytecodeHash: BytesLike, overrides?: Overrides @@ -1935,10 +1817,6 @@ export class IZkSync extends Contract { "getPriorityQueueSize()"(overrides?: CallOverrides): Promise; - getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; - - "getPriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise; - getProposedUpgradeHash(overrides?: CallOverrides): Promise; "getProposedUpgradeHash()"(overrides?: CallOverrides): Promise; @@ -1999,6 +1877,10 @@ export class IZkSync extends Contract { 2: string; }>; + getpriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + "getpriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise; + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; "isApprovedBySecurityCouncil()"(overrides?: CallOverrides): Promise; @@ -2055,15 +1937,15 @@ export class IZkSync extends Contract { l2TransactionBaseCost( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; "l2TransactionBaseCost(uint256,uint256,uint256)"( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; @@ -2265,8 +2147,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -2276,8 +2158,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -2303,100 +2185,6 @@ export class IZkSync extends Contract { overrides?: Overrides ): Promise; - serializeL2Transaction( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }>; - - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)"( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }>; - setL2BootloaderBytecodeHash( _l2BootloaderBytecodeHash: BytesLike, overrides?: Overrides @@ -2750,12 +2538,6 @@ export class IZkSync extends Contract { "getPriorityQueueSize()"(overrides?: CallOverrides): Promise; - getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; - - "getPriorityTxMaxGasLimit()"( - overrides?: CallOverrides - ): Promise; - getProposedUpgradeHash(overrides?: CallOverrides): Promise; "getProposedUpgradeHash()"(overrides?: CallOverrides): Promise; @@ -2816,6 +2598,10 @@ export class IZkSync extends Contract { 2: string; }>; + getpriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + "getpriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise; + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; "isApprovedBySecurityCouncil()"( @@ -2877,15 +2663,15 @@ export class IZkSync extends Contract { l2TransactionBaseCost( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; "l2TransactionBaseCost(uint256,uint256,uint256)"( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; @@ -3087,8 +2873,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: CallOverrides @@ -3098,8 +2884,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: CallOverrides @@ -3125,100 +2911,6 @@ export class IZkSync extends Contract { overrides?: CallOverrides ): Promise; - serializeL2Transaction( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }>; - - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)"( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise<{ - txType: BigNumber; - from: BigNumber; - to: BigNumber; - gasLimit: BigNumber; - gasPerPubdataByteLimit: BigNumber; - maxFeePerGas: BigNumber; - maxPriorityFeePerGas: BigNumber; - paymaster: BigNumber; - nonce: BigNumber; - value: BigNumber; - reserved: [BigNumber, BigNumber, BigNumber, BigNumber]; - data: string; - signature: string; - factoryDeps: BigNumber[]; - paymasterInput: string; - reservedDynamic: string; - 0: BigNumber; - 1: BigNumber; - 2: BigNumber; - 3: BigNumber; - 4: BigNumber; - 5: BigNumber; - 6: BigNumber; - 7: BigNumber; - 8: BigNumber; - 9: BigNumber; - 10: [BigNumber, BigNumber, BigNumber, BigNumber]; - 11: string; - 12: string; - 13: BigNumber[]; - 14: string; - 15: string; - }>; - setL2BootloaderBytecodeHash( _l2BootloaderBytecodeHash: BytesLike, overrides?: CallOverrides @@ -3678,12 +3370,6 @@ export class IZkSync extends Contract { "getPriorityQueueSize()"(overrides?: CallOverrides): Promise; - getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; - - "getPriorityTxMaxGasLimit()"( - overrides?: CallOverrides - ): Promise; - getProposedUpgradeHash(overrides?: CallOverrides): Promise; "getProposedUpgradeHash()"(overrides?: CallOverrides): Promise; @@ -3726,6 +3412,10 @@ export class IZkSync extends Contract { "getVerifierParams()"(overrides?: CallOverrides): Promise; + getpriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + "getpriorityTxMaxGasLimit()"(overrides?: CallOverrides): Promise; + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; "isApprovedBySecurityCouncil()"( @@ -3790,15 +3480,15 @@ export class IZkSync extends Contract { l2TransactionBaseCost( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; "l2TransactionBaseCost(uint256,uint256,uint256)"( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; @@ -3984,8 +3674,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -3995,8 +3685,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -4022,34 +3712,6 @@ export class IZkSync extends Contract { overrides?: Overrides ): Promise; - serializeL2Transaction( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise; - - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)"( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise; - setL2BootloaderBytecodeHash( _l2BootloaderBytecodeHash: BytesLike, overrides?: Overrides @@ -4420,14 +4082,6 @@ export class IZkSync extends Contract { overrides?: CallOverrides ): Promise; - getPriorityTxMaxGasLimit( - overrides?: CallOverrides - ): Promise; - - "getPriorityTxMaxGasLimit()"( - overrides?: CallOverrides - ): Promise; - getProposedUpgradeHash( overrides?: CallOverrides ): Promise; @@ -4502,6 +4156,14 @@ export class IZkSync extends Contract { overrides?: CallOverrides ): Promise; + getpriorityTxMaxGasLimit( + overrides?: CallOverrides + ): Promise; + + "getpriorityTxMaxGasLimit()"( + overrides?: CallOverrides + ): Promise; + isApprovedBySecurityCouncil( overrides?: CallOverrides ): Promise; @@ -4572,15 +4234,15 @@ export class IZkSync extends Contract { l2TransactionBaseCost( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; "l2TransactionBaseCost(uint256,uint256,uint256)"( _gasPrice: BigNumberish, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, overrides?: CallOverrides ): Promise; @@ -4768,8 +4430,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -4779,8 +4441,8 @@ export class IZkSync extends Contract { _contractL2: string, _l2Value: BigNumberish, _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, _factoryDeps: BytesLike[], _refundRecipient: string, overrides?: PayableOverrides @@ -4806,34 +4468,6 @@ export class IZkSync extends Contract { overrides?: Overrides ): Promise; - serializeL2Transaction( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise; - - "serializeL2Transaction(uint256,uint256,address,address,bytes,uint256,uint256,bytes[],uint256,address)"( - _txId: BigNumberish, - _l2Value: BigNumberish, - _sender: string, - _contractAddressL2: string, - _calldata: BytesLike, - _gasLimit: BigNumberish, - _gasPerPubdataByteLimit: BigNumberish, - _factoryDeps: BytesLike[], - _toMint: BigNumberish, - _refundRecipient: string, - overrides?: CallOverrides - ): Promise; - setL2BootloaderBytecodeHash( _l2BootloaderBytecodeHash: BytesLike, overrides?: Overrides diff --git a/sdk/zksync-web3.js/typechain/IZkSyncFactory.ts b/sdk/zksync-web3.js/typechain/IZkSyncFactory.ts index 3b217fb807b5..7fdf22ef11e0 100644 --- a/sdk/zksync-web3.js/typechain/IZkSyncFactory.ts +++ b/sdk/zksync-web3.js/typechain/IZkSyncFactory.ts @@ -1067,19 +1067,6 @@ const _abi = [ stateMutability: "view", type: "function", }, - { - inputs: [], - name: "getPriorityTxMaxGasLimit", - outputs: [ - { - internalType: "uint256", - name: "", - type: "uint256", - }, - ], - stateMutability: "view", - type: "function", - }, { inputs: [], name: "getProposedUpgradeHash", @@ -1227,6 +1214,19 @@ const _abi = [ stateMutability: "view", type: "function", }, + { + inputs: [], + name: "getpriorityTxMaxGasLimit", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "view", + type: "function", + }, { inputs: [], name: "isApprovedBySecurityCouncil", @@ -1362,12 +1362,12 @@ const _abi = [ }, { internalType: "uint256", - name: "_gasLimit", + name: "_l2GasLimit", type: "uint256", }, { internalType: "uint256", - name: "_gasPerPubdataByteLimit", + name: "_l2GasPerPubdataByteLimit", type: "uint256", }, ], @@ -1786,12 +1786,12 @@ const _abi = [ }, { internalType: "uint256", - name: "_gasLimit", + name: "_l2GasLimit", type: "uint256", }, { internalType: "uint256", - name: "_gasPerPubdataByteLimit", + name: "_l2GasPerPubdataByteLimit", type: "uint256", }, { @@ -1842,152 +1842,6 @@ const _abi = [ stateMutability: "nonpayable", type: "function", }, - { - inputs: [ - { - internalType: "uint256", - name: "_txId", - type: "uint256", - }, - { - internalType: "uint256", - name: "_l2Value", - type: "uint256", - }, - { - internalType: "address", - name: "_sender", - type: "address", - }, - { - internalType: "address", - name: "_contractAddressL2", - type: "address", - }, - { - internalType: "bytes", - name: "_calldata", - type: "bytes", - }, - { - internalType: "uint256", - name: "_gasLimit", - type: "uint256", - }, - { - internalType: "uint256", - name: "_gasPerPubdataByteLimit", - type: "uint256", - }, - { - internalType: "bytes[]", - name: "_factoryDeps", - type: "bytes[]", - }, - { - internalType: "uint256", - name: "_toMint", - type: "uint256", - }, - { - internalType: "address", - name: "_refundRecipient", - type: "address", - }, - ], - name: "serializeL2Transaction", - outputs: [ - { - components: [ - { - internalType: "uint256", - name: "txType", - type: "uint256", - }, - { - internalType: "uint256", - name: "from", - type: "uint256", - }, - { - internalType: "uint256", - name: "to", - type: "uint256", - }, - { - internalType: "uint256", - name: "gasLimit", - type: "uint256", - }, - { - internalType: "uint256", - name: "gasPerPubdataByteLimit", - type: "uint256", - }, - { - internalType: "uint256", - name: "maxFeePerGas", - type: "uint256", - }, - { - internalType: "uint256", - name: "maxPriorityFeePerGas", - type: "uint256", - }, - { - internalType: "uint256", - name: "paymaster", - type: "uint256", - }, - { - internalType: "uint256", - name: "nonce", - type: "uint256", - }, - { - internalType: "uint256", - name: "value", - type: "uint256", - }, - { - internalType: "uint256[4]", - name: "reserved", - type: "uint256[4]", - }, - { - internalType: "bytes", - name: "data", - type: "bytes", - }, - { - internalType: "bytes", - name: "signature", - type: "bytes", - }, - { - internalType: "uint256[]", - name: "factoryDeps", - type: "uint256[]", - }, - { - internalType: "bytes", - name: "paymasterInput", - type: "bytes", - }, - { - internalType: "bytes", - name: "reservedDynamic", - type: "bytes", - }, - ], - internalType: "struct IMailbox.L2CanonicalTransaction", - name: "", - type: "tuple", - }, - ], - stateMutability: "pure", - type: "function", - }, { inputs: [ { diff --git a/sdk/zksync-web3.js/typechain/update.sh b/sdk/zksync-web3.js/typechain/update.sh old mode 100644 new mode 100755