diff --git a/.dockerignore b/.dockerignore index 271d170e7d01..6b89f6c2c841 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,6 +2,7 @@ !docker/prover/prover-entry.sh !docker/zk/entrypoint.sh !docker/local-node/entrypoint.sh +!docker/external-node/entrypoint.sh !docker/contract-verifier/install-all-solc.sh !etc/test_config !etc/env/dev.env.example @@ -16,11 +17,13 @@ keys/setup !db/ !backups/ !core/ +!prover/ !yarn.lock !package.json !Cargo.lock !Cargo.toml !contracts/ +!setup_2\^26.key # It's required to remove .git from contracts, # otherwise yarn tries to use .git parent directory that # doesn't exist. @@ -30,7 +33,11 @@ contracts/.git !sdk/zksync-rs !sdk/zksync-web3.js !etc/system-contracts/bootloader/build/artifacts +!etc/system-contracts/contracts/artifacts +!etc/system-contracts/contracts/precompiles/artifacts !etc/system-contracts/artifacts-zk +!etc/multivm_bootloaders !cargo !bellman-cuda !core/bin/verification_key_generator_and_server/data/ +!prover/vk_setup_data_generator_server_fri/data/ diff --git a/prover/setup_key_generator_and_server/data/.gitkeep b/.env similarity index 100% rename from prover/setup_key_generator_and_server/data/.gitkeep rename to .env diff --git a/.gitignore b/.gitignore index 8e2006d87520..8d3d81418b78 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ a.out cobertura.xml tags *.orig +.direnv # Yarn files .yarn/ @@ -26,11 +27,9 @@ todo Cargo.lock !/Cargo.lock -!/core/bin/prover/Cargo.lock -!/core/bin/circuit_synthesizer/Cargo.lock -!/core/bin/setup_key_generator_and_server/Cargo.lock !/core/bin/verification_key_generator_and_server/Cargo.lock !/infrastructure/zksync-crypto/Cargo.lock +!/prover/Cargo.lock /etc/env/* !/etc/env/base @@ -39,7 +38,9 @@ Cargo.lock !/etc/env/ext-node-docker.toml /etc/tokens/localhost.json /etc/zksolc-bin/* +/etc/zkvyper-bin/* /etc/solc-bin/* +/etc/vyper-bin/* !/keys /keys/* !/keys/packed @@ -69,5 +70,3 @@ artifacts-zk/ cache-zk/ zksolc verified_sources - -.github diff --git a/.gitmodules b/.gitmodules index 2cc82ba95cb4..47c6f801432c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "etc/system-contracts"] - path = etc/system-contracts - url = https://github.com/matter-labs/era-system-contracts.git +path = etc/system-contracts +url = https://github.com/matter-labs/era-system-contracts.git [submodule "contracts"] - path = contracts - url = https://github.com/matter-labs/era-contracts.git +path = contracts +url = https://github.com/matter-labs/era-contracts.git diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 000000000000..6aab9b43fa34 --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +v18.18.0 diff --git a/.prettierignore b/.prettierignore index 05ba7370295a..fd0d65050de8 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,2 +1,5 @@ # Ignore submodule bellman-cuda +sdk/zksync-web3.js/CHANGELOG.md +sdk/zksync-rs/CHANGELOG.md +CHANGELOG.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f129e606f7a5..dd3d4584296f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,44 @@ # Contribution Guidelines -Thank you for considering helping out with the source code! We are extremely grateful for any consideration of -contributions to this repository. However, at this time, we generally do not accept external contributions. This policy -will change in the future, so please check back regularly for updates. +Hello! Thanks for your interest in joining the mission to accelerate the mass adoption of crypto for personal +sovereignty! We welcome contributions from anyone on the internet, and are grateful for even the smallest of fixes! -For security issues, please contact us at [security@matterlabs.dev](mailto:security@matterlabs.dev). +## Ways to contribute -Thank you for your support in accelerating the mass adoption of crypto for personal sovereignty! +There are many ways to contribute to the ZK Stack: + +1. Open issues: if you find a bug, have something you believe needs to be fixed, or have an idea for a feature, please + open an issue. +2. Add color to existing issues: provide screenshots, code snippets, and whatever you think would be helpful to resolve + issues. +3. Resolve issues: either by showing an issue isn't a problem and the current state is ok as is or by fixing the problem + and opening a PR. +4. Report security issues, see [our security policy](./github/SECURITY.md). +5. [Join the team!](https://matterlabs.notion.site/Shape-the-future-of-Ethereum-at-Matter-Labs-dfb3b5a037044bb3a8006af2eb0575e0) + +## Fixing issues + +To contribute code fixing issues, please fork the repo, fix an issue, commit, add documentation as per the PR template, +and the repo's maintainers will review the PR. +[here](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) +for guidance how to work with PRs created from a fork. + +## Licenses + +If you contribute to this project, your contributions will be made to the project under both Apache 2.0 and the MIT +license. + +## Resources + +We aim to make it as easy as possible to contribute to the mission. This is still WIP, and we're happy for contributions +and suggestions here too. Some resources to help: + +1. [In-repo docs aimed at developers](docs) +2. [zkSync Era docs!](https://era.zksync.io/docs/) +3. Company links can be found in the [repo's readme](README.md) + +## Code of Conduct + +Be polite and respectful. + +### Thank you diff --git a/Cargo.lock b/Cargo.lock index cefaa683877c..6afde668dc0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,12 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" - [[package]] name = "actix-codec" version = "0.5.1" @@ -85,7 +79,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -201,7 +195,7 @@ checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -315,12 +309,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "aliasable" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" - [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -438,7 +426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -518,7 +506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -529,7 +517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -740,7 +728,7 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "regex", "rustc-hash", "shlex", @@ -1120,7 +1108,7 @@ dependencies = [ [[package]] name = "circuit_testing" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#abd44b507840f836da6e084aaacb2ba8a7cb1df6" +source = "git+https://github.com/matter-labs/era-circuit_testing.git?branch=main#ddc4684b2e81a1d37c36d20585e445439aa2751c" dependencies = [ "bellman_ce", ] @@ -1195,7 +1183,7 @@ checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -1407,7 +1395,7 @@ dependencies = [ [[package]] name = "cross_external_nodes_checker" -version = "1.0.0" +version = "0.1.0" dependencies = [ "anyhow", "ctrlc", @@ -1416,6 +1404,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "tracing", "vlog", "zksync_types", "zksync_utils", @@ -1626,7 +1615,7 @@ source = "git+https://github.com/matter-labs/era-sync_vm.git?tag=v1.3.3-rc0#23be dependencies = [ "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "serde", "syn 1.0.109", ] @@ -1669,7 +1658,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "strsim 0.10.0", "syn 1.0.109", ] @@ -1681,7 +1670,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -1703,7 +1692,7 @@ name = "db_test_macro" version = "0.1.0" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -1745,7 +1734,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -1757,7 +1746,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "rustc_version", "syn 1.0.109", ] @@ -1817,6 +1806,12 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1858,6 +1853,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "elsa" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714f766f3556b44e7e4776ad133fcc3445a489517c25c704ace411bb14790194" +dependencies = [ + "stable_deref_trait", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -1873,16 +1877,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "enumeration_indices_migration" -version = "0.1.0" -dependencies = [ - "tokio", - "vlog", - "zksync_core", - "zksync_dal", -] - [[package]] name = "env_logger" version = "0.9.3" @@ -2060,7 +2054,7 @@ dependencies = [ "num-integer", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "serde", "syn 1.0.109", ] @@ -2101,12 +2095,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "flate2" version = "1.0.26" @@ -2284,7 +2272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -2594,8 +2582,14 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ - "ahash 0.7.6", + "ahash 0.8.3", ] [[package]] @@ -2823,18 +2817,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - [[package]] name = "hyper-tls" version = "0.5.0" @@ -2945,7 +2927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -3128,7 +3110,7 @@ source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e2 dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -3287,7 +3269,7 @@ dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -3473,6 +3455,26 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linkme" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f948366ad5bb46b5514ba7a7a80643726eef08b06632592699676748c8bc33b" +dependencies = [ + "linkme-impl", +] + +[[package]] +name = "linkme-impl" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc28438cad73dcc90ff3466fc329a9252b1b8ba668eb0d5668ba97088cf4eef0" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.33", + "syn 2.0.27", +] + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -3500,9 +3502,11 @@ dependencies = [ "static_assertions", "thiserror", "tokio", + "tracing", "vlog", "zksync", "zksync_config", + "zksync_contracts", "zksync_eth_client", "zksync_eth_signer", "zksync_types", @@ -3553,6 +3557,15 @@ dependencies = [ "libc", ] +[[package]] +name = "mach2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +dependencies = [ + "libc", +] + [[package]] name = "match_cfg" version = "0.1.0" @@ -3615,29 +3628,28 @@ dependencies = [ [[package]] name = "metrics" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ - "ahash 0.7.6", + "ahash 0.8.3", "metrics-macros", - "portable-atomic 0.3.20", + "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70" +checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ + "base64 0.21.2", "hyper", "indexmap", "ipnet", "metrics", "metrics-util", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta 0.10.1", + "quanta 0.11.1", "thiserror", "tokio", "tracing", @@ -3645,29 +3657,27 @@ dependencies = [ [[package]] name = "metrics-macros" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" +checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", - "syn 1.0.109", + "quote 1.0.33", + "syn 2.0.27", ] [[package]] name = "metrics-util" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" +checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch 0.9.15", "crossbeam-utils 0.8.16", - "hashbrown 0.12.3", + "hashbrown 0.13.1", "metrics", "num_cpus", - "parking_lot 0.12.1", - "portable-atomic 0.3.20", - "quanta 0.10.1", + "quanta 0.11.1", "sketches-ddsketch", ] @@ -3781,18 +3791,13 @@ dependencies = [ "ws2_32-sys", ] -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - [[package]] name = "multivm" version = "0.1.0" dependencies = [ "vlog", "vm", + "vm_1_3_2", "vm_m5", "vm_m6", "zksync_contracts", @@ -3986,7 +3991,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -4073,7 +4078,7 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -4132,7 +4137,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -4154,68 +4159,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel 0.5.8", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", -] - -[[package]] -name = "opentelemetry-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449048140ee61e28f57abe6e9975eedc1f3a29855c7407bd6c12b18578863379" -dependencies = [ - "async-trait", - "bytes 1.4.0", - "http", - "opentelemetry", - "reqwest", -] - -[[package]] -name = "opentelemetry-otlp" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" -dependencies = [ - "async-trait", - "futures 0.3.28", - "futures-util", - "http", - "opentelemetry", - "opentelemetry-http", - "prost", - "prost-build", - "reqwest", - "thiserror", - "tokio", - "tonic", - "tonic-build", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "985cc35d832d412224b2cffe2f9194b1b89b6aa5d0bef76d080dce09d90e62bd" -dependencies = [ - "opentelemetry", -] - [[package]] name = "os_info" version = "3.7.0" @@ -4233,29 +4176,6 @@ version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" -[[package]] -name = "ouroboros" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1358bd1558bd2a083fed428ffeda486fbfb323e698cdda7794259d592ca72db" -dependencies = [ - "aliasable", - "ouroboros_macro", -] - -[[package]] -name = "ouroboros_macro" -version = "0.15.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" -dependencies = [ - "Inflector", - "proc-macro-error", - "proc-macro2 1.0.66", - "quote 1.0.28", - "syn 1.0.109", -] - [[package]] name = "overload" version = "0.1.1" @@ -4335,7 +4255,7 @@ checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -4347,7 +4267,7 @@ checksum = "2b6937b5e67bfba3351b87b040d48352a2fcb6ad72f81855412ce97b45c8f110" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -4514,7 +4434,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -4529,16 +4449,6 @@ dependencies = [ "sha2 0.10.6", ] -[[package]] -name = "petgraph" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" -dependencies = [ - "fixedbitset", - "indexmap", -] - [[package]] name = "pin-project" version = "1.1.0" @@ -4555,7 +4465,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -4637,15 +4547,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.3", -] - [[package]] name = "portable-atomic" version = "1.3.3" @@ -4721,7 +4622,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", "version_check", ] @@ -4733,7 +4634,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "version_check", ] @@ -4762,66 +4663,36 @@ dependencies = [ ] [[package]] -name = "prometheus_exporter" -version = "1.0.0" -dependencies = [ - "metrics", - "metrics-exporter-prometheus", - "tokio", - "vlog", -] - -[[package]] -name = "prost" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" -dependencies = [ - "bytes 1.4.0", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.9.0" +name = "prometheus-client" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ - "bytes 1.4.0", - "heck 0.3.3", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "regex", - "tempfile", - "which", + "dtoa", + "itoa", + "parking_lot 0.12.1", + "prometheus-client-derive-encode", ] [[package]] -name = "prost-derive" -version = "0.9.0" +name = "prometheus-client-derive-encode" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "anyhow", - "itertools", "proc-macro2 1.0.66", - "quote 1.0.28", - "syn 1.0.109", + "quote 1.0.33", + "syn 2.0.27", ] [[package]] -name = "prost-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +name = "prometheus_exporter" +version = "0.1.0" dependencies = [ - "bytes 1.4.0", - "prost", + "anyhow", + "metrics", + "tokio", + "vise-exporter", ] [[package]] @@ -4853,16 +4724,16 @@ dependencies = [ [[package]] name = "quanta" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils 0.8.16", "libc", - "mach", + "mach2", "once_cell", "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "web-sys", "winapi 0.3.9", ] @@ -4878,9 +4749,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.28" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2 1.0.66", ] @@ -5735,7 +5606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -5792,7 +5663,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -6121,7 +5992,7 @@ dependencies = [ "hex", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "serde", "serde_json", "sha2 0.10.6", @@ -6143,12 +6014,28 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "storage_logs_dedup_migration" +version = "0.1.0" +dependencies = [ + "clap 4.3.4", + "tokio", + "zksync_dal", + "zksync_types", +] + [[package]] name = "stringprep" version = "0.1.2" @@ -6191,7 +6078,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -6212,7 +6099,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "rustversion", "syn 1.0.109", ] @@ -6241,7 +6128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "unicode-ident", ] @@ -6252,7 +6139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "unicode-ident", ] @@ -6278,7 +6165,7 @@ dependencies = [ "sha2 0.10.6", "sha3 0.10.6", "smallvec", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc0)", "zkevm_opcode_defs 1.3.2", ] @@ -6288,21 +6175,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "system-constants-generator" -version = "0.1.0" -dependencies = [ - "codegen 0.2.0", - "once_cell", - "serde", - "serde_json", - "vm", - "zksync_contracts", - "zksync_state", - "zksync_types", - "zksync_utils", -] - [[package]] name = "tagptr" version = "0.2.0" @@ -6355,7 +6227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 1.0.109", ] @@ -6390,7 +6262,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -6523,16 +6395,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.1.0" @@ -6540,7 +6402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -6641,49 +6503,6 @@ dependencies = [ "winnow", ] -[[package]] -name = "tonic" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08f4649d10a70ffa3522ca559031285d8e421d727ac85c60825761818f5d0a" -dependencies = [ - "async-stream", - "async-trait", - "base64 0.13.1", - "bytes 1.4.0", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio", - "tokio-stream", - "tokio-util 0.6.10", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic-build" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" -dependencies = [ - "proc-macro2 1.0.66", - "prost-build", - "quote 1.0.28", - "syn 1.0.109", -] - [[package]] name = "tower" version = "0.4.13" @@ -6767,7 +6586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", ] @@ -6781,16 +6600,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.1.3" @@ -6802,20 +6611,6 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", -] - [[package]] name = "tracing-serde" version = "0.1.3" @@ -7024,18 +6819,49 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "vise" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vise.git?rev=856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c#856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c" +dependencies = [ + "elsa", + "linkme", + "once_cell", + "prometheus-client", + "vise-macros", +] + +[[package]] +name = "vise-exporter" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vise.git?rev=856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c#856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c" +dependencies = [ + "hyper", + "metrics-exporter-prometheus", + "once_cell", + "tokio", + "tracing", + "vise", +] + +[[package]] +name = "vise-macros" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vise.git?rev=856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c#856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.33", + "syn 2.0.27", +] + [[package]] name = "vlog" -version = "1.0.0" +version = "0.1.0" dependencies = [ "chrono", - "opentelemetry", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", "sentry", "serde_json", "tracing", - "tracing-opentelemetry", "tracing-subscriber", ] @@ -7051,13 +6877,13 @@ dependencies = [ "once_cell", "thiserror", "tokio", - "vlog", - "zk_evm 1.3.3", - "zkevm-assembly 1.3.2", + "tracing", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", "zksync_config", "zksync_contracts", "zksync_eth_signer", "zksync_state", + "zksync_test_account", "zksync_types", "zksync_utils", ] @@ -7079,11 +6905,33 @@ name = "vm-benchmark-harness" version = "0.1.0" dependencies = [ "once_cell", - "ouroboros", "vm", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", + "zksync_config", + "zksync_contracts", + "zksync_state", + "zksync_types", + "zksync_utils", +] + +[[package]] +name = "vm_1_3_2" +version = "0.1.0" +dependencies = [ + "anyhow", + "ethabi", + "hex", + "itertools", + "metrics", + "once_cell", + "thiserror", + "tokio", + "tracing", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", + "zkevm-assembly 1.3.2", "zksync_config", "zksync_contracts", + "zksync_eth_signer", "zksync_state", "zksync_types", "zksync_utils", @@ -7195,7 +7043,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", "wasm-bindgen-shared", ] @@ -7218,7 +7066,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.28", + "quote 1.0.33", "wasm-bindgen-macro-support", ] @@ -7229,7 +7077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.28", + "quote 1.0.33", "syn 2.0.27", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -7323,17 +7171,6 @@ dependencies = [ "rustls-webpki 0.101.1", ] -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - [[package]] name = "whoami" version = "1.4.0" @@ -7604,7 +7441,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zk_evm" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.1#877ba31cc1d82316fd924e8d83a9f5f1a77b1b9a" +source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.1-rc0#877ba31cc1d82316fd924e8d83a9f5f1a77b1b9a" dependencies = [ "blake2 0.10.6", "k256", @@ -7633,10 +7470,25 @@ dependencies = [ "zkevm_opcode_defs 1.3.2", ] +[[package]] +name = "zk_evm" +version = "1.3.3" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fe8215a7047d24430ad470cf15a19bedb4d6ba0b" +dependencies = [ + "anyhow", + "lazy_static", + "num 0.4.0", + "serde", + "serde_json", + "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs 1.3.2", +] + [[package]] name = "zk_evm_abstractions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#973a1f661c045e0e8b9a287505f353659279b3b3" +source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#7502a661d7d38906d849dcd3e7a15e5848af6581" dependencies = [ "anyhow", "serde", @@ -7647,7 +7499,7 @@ dependencies = [ [[package]] name = "zkevm-assembly" version = "1.3.1" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.1#dabbb07e84dd886ee90dde2b5dde0acbf9b0123a" +source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?tag=v1.3.1-rc0#dabbb07e84dd886ee90dde2b5dde0acbf9b0123a" dependencies = [ "env_logger", "hex", @@ -7666,7 +7518,7 @@ dependencies = [ [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#edc364e59a2eea9c4b1d4ce79f15d0b7c6b55b98" +source = "git+https://github.com/matter-labs/era-zkEVM-assembly.git?branch=v1.3.2#3c61d450cbe6548068be8f313ed02f1bd229a865" dependencies = [ "env_logger", "hex", @@ -7696,7 +7548,7 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#2147cca9dce12749149739f6c750f66c2c43a055" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#a40648727910621b46a1023d940befefb0469cba" dependencies = [ "bitflags 2.3.2", "blake2 0.10.6", @@ -7710,7 +7562,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?tag=v1.3.3-rc1#46391e83330c77f6d79ff7c5fc19bae3d5b3ab82" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.3.3#46391e83330c77f6d79ff7c5fc19bae3d5b3ab82" dependencies = [ "bincode", "circuit_testing", @@ -7730,7 +7582,7 @@ dependencies = [ "sync_vm", "test-log", "tracing", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc0)", "zkevm-assembly 1.3.2", ] @@ -7754,7 +7606,7 @@ dependencies = [ [[package]] name = "zksync_basic_types" -version = "1.0.0" +version = "0.1.0" dependencies = [ "serde", "web3", @@ -7762,8 +7614,9 @@ dependencies = [ [[package]] name = "zksync_circuit_breaker" -version = "1.0.0" +version = "0.1.0" dependencies = [ + "anyhow", "assert_matches", "async-trait", "backon", @@ -7782,10 +7635,11 @@ dependencies = [ [[package]] name = "zksync_config" -version = "1.0.0" +version = "0.1.0" dependencies = [ "bigdecimal", "envy", + "hex", "num 0.3.1", "once_cell", "serde", @@ -7815,6 +7669,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tracing", "vlog", "zksync_config", "zksync_contracts", @@ -7826,8 +7681,9 @@ dependencies = [ [[package]] name = "zksync_contracts" -version = "1.0.0" +version = "0.1.0" dependencies = [ + "envy", "ethabi", "hex", "once_cell", @@ -7838,7 +7694,7 @@ dependencies = [ [[package]] name = "zksync_core" -version = "1.0.0" +version = "0.1.0" dependencies = [ "actix-cors", "actix-rt", @@ -7896,6 +7752,7 @@ dependencies = [ "zksync_queued_job_processor", "zksync_state", "zksync_storage", + "zksync_test_account", "zksync_types", "zksync_utils", "zksync_verification_key_generator_and_server", @@ -7904,7 +7761,7 @@ dependencies = [ [[package]] name = "zksync_crypto" -version = "1.0.0" +version = "0.1.0" dependencies = [ "base64 0.13.1", "blake2 0.10.6", @@ -7919,7 +7776,7 @@ dependencies = [ [[package]] name = "zksync_dal" -version = "1.0.0" +version = "0.1.0" dependencies = [ "anyhow", "assert_matches", @@ -7937,7 +7794,7 @@ dependencies = [ "strum", "thiserror", "tokio", - "vlog", + "tracing", "zksync_config", "zksync_contracts", "zksync_health_check", @@ -7947,7 +7804,7 @@ dependencies = [ [[package]] name = "zksync_eth_client" -version = "1.0.0" +version = "0.1.0" dependencies = [ "anyhow", "async-trait", @@ -7957,7 +7814,7 @@ dependencies = [ "serde", "thiserror", "tokio", - "vlog", + "tracing", "zksync_config", "zksync_contracts", "zksync_eth_signer", @@ -7966,7 +7823,7 @@ dependencies = [ [[package]] name = "zksync_eth_signer" -version = "1.0.0" +version = "0.1.0" dependencies = [ "actix-rt", "actix-web", @@ -7988,14 +7845,16 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "1.0.0" +version = "0.1.0" dependencies = [ "anyhow", + "clap 4.3.4", "envy", "futures 0.3.28", "prometheus_exporter", "serde", "tokio", + "tracing", "url", "vlog", "zksync_basic_types", @@ -8021,26 +7880,26 @@ dependencies = [ "serde", "serde_json", "tokio", + "tracing", ] [[package]] name = "zksync_mempool" -version = "1.0.0" +version = "0.1.0" dependencies = [ "metrics", - "vlog", + "tracing", "zksync_types", ] [[package]] name = "zksync_merkle_tree" -version = "1.0.0" +version = "0.1.0" dependencies = [ "assert_matches", "clap 4.3.4", "insta", "leb128", - "metrics", "once_cell", "rand 0.8.5", "rayon", @@ -8049,7 +7908,8 @@ dependencies = [ "serde_with", "tempfile", "thiserror", - "vlog", + "tracing", + "vise", "zksync_config", "zksync_crypto", "zksync_storage", @@ -8058,7 +7918,7 @@ dependencies = [ [[package]] name = "zksync_mini_merkle_tree" -version = "1.0.0" +version = "0.1.0" dependencies = [ "criterion", "once_cell", @@ -8068,7 +7928,7 @@ dependencies = [ [[package]] name = "zksync_object_store" -version = "1.0.0" +version = "0.1.0" dependencies = [ "async-trait", "bincode", @@ -8078,15 +7938,16 @@ dependencies = [ "metrics", "tempdir", "tokio", - "vlog", + "tracing", "zksync_config", "zksync_types", ] [[package]] name = "zksync_prover_utils" -version = "1.0.0" +version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "ctrlc", "futures 0.3.28", @@ -8095,50 +7956,69 @@ dependencies = [ "reqwest", "tokio", "toml_edit 0.14.4", - "vlog", + "tracing", "zksync_config", + "zksync_object_store", + "zksync_types", "zksync_utils", ] [[package]] name = "zksync_queued_job_processor" -version = "1.0.0" +version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "tokio", - "vlog", + "tracing", "zksync_utils", ] [[package]] name = "zksync_state" -version = "1.0.0" +version = "0.1.0" dependencies = [ + "anyhow", "db_test_macro", "metrics", "mini-moka", + "rand 0.8.5", "tempfile", "tokio", - "vlog", + "tracing", "zksync_dal", "zksync_storage", "zksync_types", + "zksync_utils", ] [[package]] name = "zksync_storage" -version = "1.0.0" +version = "0.1.0" dependencies = [ "metrics", "num_cpus", "rocksdb", "tempfile", - "vlog", + "tracing", +] + +[[package]] +name = "zksync_test_account" +version = "0.1.0" +dependencies = [ + "ethabi", + "hex", + "zksync_config", + "zksync_contracts", + "zksync_eth_signer", + "zksync_types", + "zksync_utils", ] [[package]] name = "zksync_types" -version = "1.0.0" +version = "0.1.0" dependencies = [ "blake2 0.10.6", "chrono", @@ -8158,7 +8038,7 @@ dependencies = [ "strum", "thiserror", "tokio", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", "zkevm_test_harness", "zksync_basic_types", "zksync_config", @@ -8169,7 +8049,7 @@ dependencies = [ [[package]] name = "zksync_utils" -version = "1.0.0" +version = "0.1.0" dependencies = [ "anyhow", "bigdecimal", @@ -8184,14 +8064,15 @@ dependencies = [ "serde_json", "thiserror", "tokio", + "tracing", "vlog", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", "zksync_basic_types", ] [[package]] name = "zksync_verification_key_generator_and_server" -version = "1.0.0" +version = "0.1.0" dependencies = [ "bincode", "circuit_testing", @@ -8201,6 +8082,7 @@ dependencies = [ "once_cell", "serde_json", "structopt", + "tracing", "vlog", "zksync_prover_utils", "zksync_types", @@ -8208,7 +8090,7 @@ dependencies = [ [[package]] name = "zksync_web3_decl" -version = "1.0.0" +version = "0.1.0" dependencies = [ "bigdecimal", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 5c47da13111f..547f0055f348 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,24 +1,15 @@ [workspace] members = [ - # Prover - # We don't include `prove` image here since it cannot be built with stable rust - # and we don't want to use nightly for the whole workspace - # "core/bin/prover", # Server "core/bin/zksync_core", "core/bin/external_node", - "core/bin/system-constants-generator", + # "core/bin/system-constants-generator", "core/tests/cross_external_nodes_checker", # Contract verifier "core/bin/contract-verifier", - # Setup key generator and server: its commented as it cannot be built with stable rust. - # "core/bin/setup_key_generator_and_server", # Verification key generator and server "core/bin/verification_key_generator_and_server", - #"core/bin/witness_generator", - # circuit synthesizer: its commented as it cannot be built with stable rust. - # "core/bin/circuit_synthesizer", - "core/bin/enumeration_indices_migration", + "core/bin/storage_logs_dedup_migration", # Libraries "core/lib/basic_types", "core/lib/config", @@ -31,8 +22,8 @@ members = [ "core/lib/eth_signer", "core/lib/mempool", "core/lib/merkle_tree", - "core/lib/object_store", "core/lib/mini_merkle_tree", + "core/lib/object_store", "core/lib/prometheus_exporter", "core/lib/queued_job_processor", "core/lib/state", @@ -41,9 +32,10 @@ members = [ "core/lib/prover_utils", "core/lib/utils", "core/lib/vlog", - "core/lib/vm", "core/lib/multivm", + "core/lib/vm", "core/lib/web3_decl", + "core/lib/test_account", # MultiVM dependencies "core/multivm_deps/vm_m5", diff --git a/README.md b/README.md index d6e2e81b6837..25030ac34216 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,11 @@ The following questions will be answered by the following resources: | What is the logical project structure and architecture? | [architecture.md](docs/architecture.md) | | Where can I find developer docs? | [docs](https://v2-docs.zksync.io/dev/) | +## Policies + +- [Security policy](.github/SECURITY.md) +- [Contribution policy](CONTRIBUTING.md) + ## License zkSync Era is distributed under the terms of either @@ -32,6 +37,7 @@ at your option. - [Website](https://zksync.io/) - [GitHub](https://github.com/matter-labs) +- [ZK Credo](https://github.com/zksync/credo) - [Twitter](https://twitter.com/zksync) - [Twitter for Devs](https://twitter.com/zkSyncDevs) - [Discord](https://discord.gg/nMaPGrDDwk) diff --git a/contracts b/contracts index 0697bcc39547..67f38daa255c 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 0697bcc395476402ffdcdc698c5c963ad7ddceba +Subproject commit 67f38daa255c31e778e9a6f8c673f36914513fa1 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 0ab823892422..06ee1328d4ba 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,279 @@ # Changelog +## [9.0.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.7.0...core-v9.0.0) (2023-09-21) + + +### ⚠ BREAKING CHANGES + +* update verification keys, protocol version 15 ([#2602](https://github.com/matter-labs/zksync-2-dev/issues/2602)) + +### Features + +* **contracts:** Allow reading contracts code from other directories ([#2613](https://github.com/matter-labs/zksync-2-dev/issues/2613)) ([1481eb8](https://github.com/matter-labs/zksync-2-dev/commit/1481eb84cbac891586a41d6d9739ae343e3c1bb8)) +* make data returned from the VM to have arbitrary length ([#2479](https://github.com/matter-labs/zksync-2-dev/issues/2479)) ([9251690](https://github.com/matter-labs/zksync-2-dev/commit/92516901cb2db61987554ddf0f8e080a15e7e72e)) +* **prover-fri:** added picked-by column in prover fri related tables ([#2600](https://github.com/matter-labs/zksync-2-dev/issues/2600)) ([9e604ab](https://github.com/matter-labs/zksync-2-dev/commit/9e604abf3bae11b6f583f2abd39c07a85dc20f0a)) +* update verification keys, protocol version 15 ([#2602](https://github.com/matter-labs/zksync-2-dev/issues/2602)) ([2fff59b](https://github.com/matter-labs/zksync-2-dev/commit/2fff59bab00849996864b68e932739135337ebd7)) +* **vlog:** Rework the observability configuration subsystem ([#2608](https://github.com/matter-labs/zksync-2-dev/issues/2608)) ([377f0c5](https://github.com/matter-labs/zksync-2-dev/commit/377f0c5f734c979bc990b429dff0971466872e71)) +* **vm:** Multivm tracer support ([#2601](https://github.com/matter-labs/zksync-2-dev/issues/2601)) ([4a7467b](https://github.com/matter-labs/zksync-2-dev/commit/4a7467b1b1556bfd795792dbe280bcf28c93a58f)) + +## [8.7.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.6.0...core-v8.7.0) (2023-09-19) + + +### Features + +* Rework metrics approach ([#2387](https://github.com/matter-labs/zksync-2-dev/issues/2387)) ([4855546](https://github.com/matter-labs/zksync-2-dev/commit/48555465d32f8524f6cf488859e8ae8259ecf5da)) + + +### Bug Fixes + +* **db:** Vacuum `storage_logs` table along with removing duplicate rows ([#2583](https://github.com/matter-labs/zksync-2-dev/issues/2583)) ([84f472d](https://github.com/matter-labs/zksync-2-dev/commit/84f472deb14a92bd2b90f8160c9316d21b646ff4)) +* **prover_fri:** drop not null constraint from proof_compression_jobs_fri fri_proof_blob_url column ([#2590](https://github.com/matter-labs/zksync-2-dev/issues/2590)) ([5e41fee](https://github.com/matter-labs/zksync-2-dev/commit/5e41fee69c869c53d999c3ee53e3e7dd6735e603)) +* **state-keeper:** deduplication logic ([#2597](https://github.com/matter-labs/zksync-2-dev/issues/2597)) ([7122a2b](https://github.com/matter-labs/zksync-2-dev/commit/7122a2b0cc33a96a4c117186437db8e290388356)) +* **storage:** Ignore non-committed factory deps in RocksDB ([#2585](https://github.com/matter-labs/zksync-2-dev/issues/2585)) ([b3da824](https://github.com/matter-labs/zksync-2-dev/commit/b3da82483639728bd899fb9388b9b2868cb28159)) +* **vm:** Handle near call gas correctly ([#2587](https://github.com/matter-labs/zksync-2-dev/issues/2587)) ([c925259](https://github.com/matter-labs/zksync-2-dev/commit/c9252597ce330d0c982365bb703c373191d03506)) + +## [8.6.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.5.0...core-v8.6.0) (2023-09-15) + + +### Features + +* **prover-fri:** insert missing protocol version in FRI witness-gen table ([#2577](https://github.com/matter-labs/zksync-2-dev/issues/2577)) ([b9af6a5](https://github.com/matter-labs/zksync-2-dev/commit/b9af6a5784b0e6538bd542830593d16f3caf5fe5)) +* **prover-server-api:** Add SkippedProofGeneration in SubmitProofRequest ([#2575](https://github.com/matter-labs/zksync-2-dev/issues/2575)) ([9c2653e](https://github.com/matter-labs/zksync-2-dev/commit/9c2653e5bc0e56b2906e9d25be3cb2887ad7d35d)) + +## [8.5.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.4.0...core-v8.5.0) (2023-09-15) + + +### Features + +* **API:** enable request translator for ws api ([#2568](https://github.com/matter-labs/zksync-2-dev/issues/2568)) ([ccb6cad](https://github.com/matter-labs/zksync-2-dev/commit/ccb6cad57c9ba0ca58114701c256dbc44a457459)) +* Use tracing directly instead of vlog macros ([#2566](https://github.com/matter-labs/zksync-2-dev/issues/2566)) ([53d53af](https://github.com/matter-labs/zksync-2-dev/commit/53d53afc9157214fb911aa0934a97f8b5103e1ec)) +* **witness-generator:** Get wit inputs from MerklePath instead of SK ([#2559](https://github.com/matter-labs/zksync-2-dev/issues/2559)) ([da1c2fa](https://github.com/matter-labs/zksync-2-dev/commit/da1c2fa2b043bc4e31075a0454dc0e09937c93ad)) + + +### Bug Fixes + +* Do not automatically emit sentry events on vlog::error ([#2560](https://github.com/matter-labs/zksync-2-dev/issues/2560)) ([aebcd86](https://github.com/matter-labs/zksync-2-dev/commit/aebcd8634a0984aaf357b03d925932807848b6b8)) +* filter_near_call performance ([#2523](https://github.com/matter-labs/zksync-2-dev/issues/2523)) ([eccb06b](https://github.com/matter-labs/zksync-2-dev/commit/eccb06b649621b6866476c6c5a95545e3359d79b)) + +## [8.4.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.3.1...core-v8.4.0) (2023-09-14) + + +### Features + +* **API:** new translator for virtual blocks for zks_getLogs endpoint ([#2505](https://github.com/matter-labs/zksync-2-dev/issues/2505)) ([35b0553](https://github.com/matter-labs/zksync-2-dev/commit/35b05537dc8fecf11be477bd156da332d75b1320)) +* **contract-verifier:** Add zkvyper v1.3.10 ([#2554](https://github.com/matter-labs/zksync-2-dev/issues/2554)) ([711c5db](https://github.com/matter-labs/zksync-2-dev/commit/711c5db4bd48e9b4b166256e8c9554ef0e54fad8)) +* **contract-verifier:** Support verification for zksolc v1.3.14 ([#2546](https://github.com/matter-labs/zksync-2-dev/issues/2546)) ([adea3ef](https://github.com/matter-labs/zksync-2-dev/commit/adea3efd39099ef9599e24d47de6c7cffe6b0287)) +* Decrease crate versions back to 0.1.0 ([#2528](https://github.com/matter-labs/zksync-2-dev/issues/2528)) ([adb7614](https://github.com/matter-labs/zksync-2-dev/commit/adb76142882dde197cd64b1aaaffb01906427054)) +* **prover-fri:** Restrict prover to pick jobs for which they have vk's ([#2541](https://github.com/matter-labs/zksync-2-dev/issues/2541)) ([cedba03](https://github.com/matter-labs/zksync-2-dev/commit/cedba03ea66fc0da479e60d5ca30d8f67e32358a)) +* **vm:** Make execute interface more obvious ([#2536](https://github.com/matter-labs/zksync-2-dev/issues/2536)) ([4cb18cb](https://github.com/matter-labs/zksync-2-dev/commit/4cb18cb06e87628ad122fc9857c789d1411a7f77)) + + +### Bug Fixes + +* **multi_vm:** Fix executing eth_call for old vm version ([#2558](https://github.com/matter-labs/zksync-2-dev/issues/2558)) ([0f3b990](https://github.com/matter-labs/zksync-2-dev/commit/0f3b990735caab8c905a9b51256608f4f7614ff1)) +* **vm:** Add trait for convinient usage of tracers ([#2550](https://github.com/matter-labs/zksync-2-dev/issues/2550)) ([bc2ed11](https://github.com/matter-labs/zksync-2-dev/commit/bc2ed1188cf545cfae1266302f1d5c2ef1feab0f)) + + +### Performance Improvements + +* **state-keeper:** only persist unique storage logs per miniblock ([#1793](https://github.com/matter-labs/zksync-2-dev/issues/1793)) ([d0ef78b](https://github.com/matter-labs/zksync-2-dev/commit/d0ef78b294c4e29692170c9b244414c7a5b9aa6c)) + +## [8.3.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.3.0...core-v8.3.1) (2023-09-12) + + +### Bug Fixes + +* **house-keeper:** remove extra ! from status column ([#2539](https://github.com/matter-labs/zksync-2-dev/issues/2539)) ([583dadb](https://github.com/matter-labs/zksync-2-dev/commit/583dadb91885e664b79b299fc2cd84d5077cc2cd)) + +## [8.3.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.2.1...core-v8.3.0) (2023-09-11) + + +### Features + +* **api:** Report some metrics more often ([#2519](https://github.com/matter-labs/zksync-2-dev/issues/2519)) ([eede188](https://github.com/matter-labs/zksync-2-dev/commit/eede188f6160fa383496c7c8ae8409c68bc54114)) +* **housekeeper:** add proof compressor retry and queued jobs reporting ([#2526](https://github.com/matter-labs/zksync-2-dev/issues/2526)) ([4321545](https://github.com/matter-labs/zksync-2-dev/commit/432154527dc85a17fc83c9e866b772a8d6f47673)) +* **metrics:** add more metrics to dry run ([#2529](https://github.com/matter-labs/zksync-2-dev/issues/2529)) ([0abdbb8](https://github.com/matter-labs/zksync-2-dev/commit/0abdbb8bd3229d2907f1f82493b2cf8e7a6a3254)) +* **vm:** New vm intregration ([#2198](https://github.com/matter-labs/zksync-2-dev/issues/2198)) ([f5e7e7a](https://github.com/matter-labs/zksync-2-dev/commit/f5e7e7a6fa81ab46289016f57a6123ffec83bcf6)) + + +### Bug Fixes + +* **vm:** Add bootloader tip execution mode ([#2535](https://github.com/matter-labs/zksync-2-dev/issues/2535)) ([2d64a3a](https://github.com/matter-labs/zksync-2-dev/commit/2d64a3a0947d131a4f9baf57afd1e26bccbc7b81)) + + +### Performance Improvements + +* **db:** Support previous blocks in VM values cache ([#2474](https://github.com/matter-labs/zksync-2-dev/issues/2474)) ([5eb32c5](https://github.com/matter-labs/zksync-2-dev/commit/5eb32c588b4ae1c85ef8fc95f70e03921eb19625)) + +## [8.2.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.2.0...core-v8.2.1) (2023-09-07) + + +### Bug Fixes + +* **api:** miniblock_hash loading ([#2513](https://github.com/matter-labs/zksync-2-dev/issues/2513)) ([c553dae](https://github.com/matter-labs/zksync-2-dev/commit/c553daeca49a943a323cefa2017808e6c06728e9)) + +## [8.2.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.1.1...core-v8.2.0) (2023-09-06) + + +### Features + +* **prover-fri-compressor:** Create a dedicated component for FRI proof conversion ([#2501](https://github.com/matter-labs/zksync-2-dev/issues/2501)) ([cd43aa7](https://github.com/matter-labs/zksync-2-dev/commit/cd43aa73095bf97b54c9fbcc9934128cc29506c2)) +* **prover-fri:** use separate object store config for FRI prover components ([#2494](https://github.com/matter-labs/zksync-2-dev/issues/2494)) ([7f2537f](https://github.com/matter-labs/zksync-2-dev/commit/7f2537fc987c55b6efec925506478d665d20c0c4)) +* **witness-generator:** Add Witness Storage, later used in Wit Gen ([#2509](https://github.com/matter-labs/zksync-2-dev/issues/2509)) ([c78ddf3](https://github.com/matter-labs/zksync-2-dev/commit/c78ddf33e7de929fd369472e8892a2a83f2b0ac2)) +* **witness-generator:** Basic Wit Gen uses multiple Storage backends ([#2510](https://github.com/matter-labs/zksync-2-dev/issues/2510)) ([1dc1f1c](https://github.com/matter-labs/zksync-2-dev/commit/1dc1f1c4e65f0f49c63a654c64596dc085911791)) + + +### Bug Fixes + +* **api:** Use multivm bootloader in debug ([#2504](https://github.com/matter-labs/zksync-2-dev/issues/2504)) ([ae2a357](https://github.com/matter-labs/zksync-2-dev/commit/ae2a357f38a57498ef527f2ccbce1d32b9b3f7b5)) +* **witnes-gen:** Fix getting bootloader memory ([#2507](https://github.com/matter-labs/zksync-2-dev/issues/2507)) ([bb8f894](https://github.com/matter-labs/zksync-2-dev/commit/bb8f89472432e9b11c538881f27dda8afdf46a4f)) +* **witness-generator:** Add Data Source config for Basic Witness Gens ([#2502](https://github.com/matter-labs/zksync-2-dev/issues/2502)) ([9126597](https://github.com/matter-labs/zksync-2-dev/commit/91265973d0eabb34e056277cd2aa730c05a9c06f)) + +## [8.1.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.1.0...core-v8.1.1) (2023-09-06) + + +### Performance Improvements + +* **db:** Add `miniblocks_pending_batch` DB index ([#2496](https://github.com/matter-labs/zksync-2-dev/issues/2496)) ([dc20057](https://github.com/matter-labs/zksync-2-dev/commit/dc200570f62bb52de5fa798a353f08fae0a3fc71)) + +## [8.1.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.0.0...core-v8.1.0) (2023-09-05) + + +### Features + +* **genesis:** make it possible to create genesis block with given protocol version ([#2471](https://github.com/matter-labs/zksync-2-dev/issues/2471)) ([430de1f](https://github.com/matter-labs/zksync-2-dev/commit/430de1f4ed59e9bc1eeb029dacdf88684c34a1ad)) + + +### Bug Fixes + +* **api:** Zeroes in eth_feeHistory ([#2490](https://github.com/matter-labs/zksync-2-dev/issues/2490)) ([67cd433](https://github.com/matter-labs/zksync-2-dev/commit/67cd433f57e01fdf94da7461c6b76a6948815212)) +* **db:** Fix `get_expected_l1_batch_timestamp` query ([#2492](https://github.com/matter-labs/zksync-2-dev/issues/2492)) ([660ae98](https://github.com/matter-labs/zksync-2-dev/commit/660ae98d34b48f8c97c50c8c7988049e50d90297)) + +## [8.0.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v7.2.0...core-v8.0.0) (2023-09-05) + + +### ⚠ BREAKING CHANGES + +* Fix a bug for the ending of the upgrade ([#2478](https://github.com/matter-labs/zksync-2-dev/issues/2478)) + +### Features + +* **prover-fri:** Add protocol version for FRI prover related tables ([#2458](https://github.com/matter-labs/zksync-2-dev/issues/2458)) ([784a52b](https://github.com/matter-labs/zksync-2-dev/commit/784a52bc2d2fa784fe82cc10df1d39895255ade5)) + + +### Bug Fixes + +* **api:** Use MultiVM in API ([#2476](https://github.com/matter-labs/zksync-2-dev/issues/2476)) ([683582d](https://github.com/matter-labs/zksync-2-dev/commit/683582dab2fb26d09a5e183ac9e4d0d9e61286e4)) +* Fix a bug for the ending of the upgrade ([#2478](https://github.com/matter-labs/zksync-2-dev/issues/2478)) ([5fbad97](https://github.com/matter-labs/zksync-2-dev/commit/5fbad971af10240feaa8da3062dcf7c98aca3f02)) + +## [7.2.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v7.1.0...core-v7.2.0) (2023-09-01) + + +### Features + +* **api:** Implement rate-limiting for WebSocket jsonrpc backend ([#2468](https://github.com/matter-labs/zksync-2-dev/issues/2468)) ([db86c11](https://github.com/matter-labs/zksync-2-dev/commit/db86c11caf1c63de6fa6be5031636d95125fa6c9)) +* **api:** make gas per pubdata field to be not optional in SDK and API for transaction details endpoint ([#2431](https://github.com/matter-labs/zksync-2-dev/issues/2431)) ([4c3636a](https://github.com/matter-labs/zksync-2-dev/commit/4c3636a33af345046d5a78ee8b65b1a4d1066e98)) + + +### Bug Fixes + +* debug and fix local node setup ([#2408](https://github.com/matter-labs/zksync-2-dev/issues/2408)) ([4f3a9e6](https://github.com/matter-labs/zksync-2-dev/commit/4f3a9e695c868a181c7ecd1dbbd647b1a2a74a4f)) + +## [7.1.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v7.0.0...core-v7.1.0) (2023-09-01) + + +### Features + +* **api:** Support batch request size limiting in jsonrpc ([#2461](https://github.com/matter-labs/zksync-2-dev/issues/2461)) ([287d360](https://github.com/matter-labs/zksync-2-dev/commit/287d360d03914adf3e15c115470709abadf4585c)) +* **prover-gateway:** integrate snark wrapper to transform FRI proof to old ([#2413](https://github.com/matter-labs/zksync-2-dev/issues/2413)) ([60bb26b](https://github.com/matter-labs/zksync-2-dev/commit/60bb26bdc31f13f2b9253b245e848951e8e6e501)) +* **witness_generator:** Add flagged upload path on state keeper ([#2448](https://github.com/matter-labs/zksync-2-dev/issues/2448)) ([10b78cb](https://github.com/matter-labs/zksync-2-dev/commit/10b78cb6b31e2bfe6c84d6cb76f3228003e44ae7)) + + +### Bug Fixes + +* **en:** Set correct hashes for old blocks ([#2463](https://github.com/matter-labs/zksync-2-dev/issues/2463)) ([aa5d0b1](https://github.com/matter-labs/zksync-2-dev/commit/aa5d0b126b8e68a8f4e8da874611165acf145a73)) +* **en:** Set correct version for upgrade ([#2464](https://github.com/matter-labs/zksync-2-dev/issues/2464)) ([44e5f32](https://github.com/matter-labs/zksync-2-dev/commit/44e5f32b910917f1661fbd0139f2ba35cbc9eca0)) +* **state-keeper:** Calculate miniblock hash based on protocol version ([#2462](https://github.com/matter-labs/zksync-2-dev/issues/2462)) ([01bee1d](https://github.com/matter-labs/zksync-2-dev/commit/01bee1dcd1c398374253bb8b40ab9385d9fd8547)) + +## [7.0.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v6.2.0...core-v7.0.0) (2023-08-30) + + +### ⚠ BREAKING CHANGES + +* **vm:** replace L1 batch number and timestamp with miniblock number and timestamp ([#1975](https://github.com/matter-labs/zksync-2-dev/issues/1975)) + +### Features + +* **prover-server-split:** insert batches to be proven on proof_generation_details table ([#2417](https://github.com/matter-labs/zksync-2-dev/issues/2417)) ([504c37f](https://github.com/matter-labs/zksync-2-dev/commit/504c37fc3aeab951b335b574508289fef33e1700)) +* **vm:** replace L1 batch number and timestamp with miniblock number and timestamp ([#1975](https://github.com/matter-labs/zksync-2-dev/issues/1975)) ([6814c7e](https://github.com/matter-labs/zksync-2-dev/commit/6814c7eafa0c6f29607e81acfffc70ac1fa5fa96)) +* **witness_generator:** Add upload_witness_inputs_to_gcs flag ([#2444](https://github.com/matter-labs/zksync-2-dev/issues/2444)) ([9f0b87e](https://github.com/matter-labs/zksync-2-dev/commit/9f0b87ef1b33defa71ef98ff2cd5fb66f6537837)) + + +### Bug Fixes + +* **api:** fix eth_call for old blocks ([#2440](https://github.com/matter-labs/zksync-2-dev/issues/2440)) ([19bba44](https://github.com/matter-labs/zksync-2-dev/commit/19bba4413f8f4197e2178e409106eecf12089d08)) +* **en:** Allow executed batch reversion ([#2442](https://github.com/matter-labs/zksync-2-dev/issues/2442)) ([a47b72d](https://github.com/matter-labs/zksync-2-dev/commit/a47b72db82527409e224467bfb07ca642426385f)) +* **en:** Insert protocol version for pending batch ([#2450](https://github.com/matter-labs/zksync-2-dev/issues/2450)) ([dd0792e](https://github.com/matter-labs/zksync-2-dev/commit/dd0792ea200f255c9f9f3e55924cb7caf0452b89)) +* **en:** Set protocol version for pending blocks in EN ([#2443](https://github.com/matter-labs/zksync-2-dev/issues/2443)) ([7464395](https://github.com/matter-labs/zksync-2-dev/commit/746439527be10ad513607094953ba7523316b843)) + +## [6.2.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v6.1.0...core-v6.2.0) (2023-08-28) + + +### Features + +* **hyperchain:** hyperchain script ([#2410](https://github.com/matter-labs/zksync-2-dev/issues/2410)) ([52b63d3](https://github.com/matter-labs/zksync-2-dev/commit/52b63d348f634a4434d21aa2b1955e55859556d6)) + + +### Bug Fixes + +* **api:** Revert `ProtocolVersionId` serialization ([#2425](https://github.com/matter-labs/zksync-2-dev/issues/2425)) ([e2eee91](https://github.com/matter-labs/zksync-2-dev/commit/e2eee9121961fae234e8228c35ef4265b1328cf1)) +* **db:** Fix prover_jobs indices ([#2416](https://github.com/matter-labs/zksync-2-dev/issues/2416)) ([4104e7e](https://github.com/matter-labs/zksync-2-dev/commit/4104e7e1e3bd5dfc3c46827e45527dc9a40b7757)) + +## [6.1.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v6.0.0...core-v6.1.0) (2023-08-25) + + +### Features + +* Add logging for health check changes ([#2401](https://github.com/matter-labs/zksync-2-dev/issues/2401)) ([3be83e5](https://github.com/matter-labs/zksync-2-dev/commit/3be83e5481f12745579a1d7e6c42d2fa27a0a566)) +* **api:** Measure difference from last miniblock for JSON-RPC APIs ([#2370](https://github.com/matter-labs/zksync-2-dev/issues/2370)) ([c706927](https://github.com/matter-labs/zksync-2-dev/commit/c706927d233935c20ac074968a82b10449eb4dff)) +* **eth-sender:** add support for loading new proofs from GCS ([#2392](https://github.com/matter-labs/zksync-2-dev/issues/2392)) ([54f6f53](https://github.com/matter-labs/zksync-2-dev/commit/54f6f53953ddd20c19a8d6de092700de2835ad33)) +* glue VM version, protocol version and EN ([#2411](https://github.com/matter-labs/zksync-2-dev/issues/2411)) ([c3768fc](https://github.com/matter-labs/zksync-2-dev/commit/c3768fc028afbd4b0ed8d005430a0d3a1ede72c1)) +* **prover-server-split:** consume API for fetching proof gen data and submitting proofs ([#2365](https://github.com/matter-labs/zksync-2-dev/issues/2365)) ([6e99471](https://github.com/matter-labs/zksync-2-dev/commit/6e994717086941fd2538fced7c32b4bb5eeb4eac)) + + +### Bug Fixes + +* **contract-verifier:** No panic when 2 parallel cvs insert same key ([#2396](https://github.com/matter-labs/zksync-2-dev/issues/2396)) ([f0d9081](https://github.com/matter-labs/zksync-2-dev/commit/f0d90815cc2a27b27f84c0aa434a16fff59f356f)) +* **en:** do not save protocol version for miniblocks/batches in EN ([#2403](https://github.com/matter-labs/zksync-2-dev/issues/2403)) ([75bd867](https://github.com/matter-labs/zksync-2-dev/commit/75bd867079830d4519c1e20c4a53af749ecc325d)) +* **en:** Save system tx to en ([#2402](https://github.com/matter-labs/zksync-2-dev/issues/2402)) ([0bb50a5](https://github.com/matter-labs/zksync-2-dev/commit/0bb50a5b31d5e0960ed3dec84b21170d6ccfddad)) +* save protocol versions in prover DB ([#2384](https://github.com/matter-labs/zksync-2-dev/issues/2384)) ([0fc2195](https://github.com/matter-labs/zksync-2-dev/commit/0fc21952e630f56df582b79e071998564132f67f)) + +## [6.0.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.28.1...core-v6.0.0) (2023-08-18) + + +### ⚠ BREAKING CHANGES + +* new upgrade system ([#1784](https://github.com/matter-labs/zksync-2-dev/issues/1784)) + +### Features + +* Add shadow_storage to enable shadow state read ([#2366](https://github.com/matter-labs/zksync-2-dev/issues/2366)) ([3269d82](https://github.com/matter-labs/zksync-2-dev/commit/3269d82de20b205feec4e4056dad51cd28e14f8f)) +* **db:** Instrument key queries in DAL ([#2318](https://github.com/matter-labs/zksync-2-dev/issues/2318)) ([eb08ed6](https://github.com/matter-labs/zksync-2-dev/commit/eb08ed69db8655dd4e0d485597568c6e4e01e5bf)) +* new upgrade system ([#1784](https://github.com/matter-labs/zksync-2-dev/issues/1784)) ([469a4c3](https://github.com/matter-labs/zksync-2-dev/commit/469a4c332a4f02b5a642b2951fd00228c9317f59)) +* **prover-fri:** Add socket listener to receive witness vector over network ([#2367](https://github.com/matter-labs/zksync-2-dev/issues/2367)) ([19c9d89](https://github.com/matter-labs/zksync-2-dev/commit/19c9d89613a5d3ab5e55d9224a512a8952aa3689)) +* **prover-server-split:** expose API from server for requesting proof gen data and submitting proof ([#2292](https://github.com/matter-labs/zksync-2-dev/issues/2292)) ([401d0ab](https://github.com/matter-labs/zksync-2-dev/commit/401d0ab51bfce89203fd82b5f8d1a6865f6d19b0)) +* **witness-vector-generator:** Perform circuit synthesis on external machine ([#2351](https://github.com/matter-labs/zksync-2-dev/issues/2351)) ([6839f3f](https://github.com/matter-labs/zksync-2-dev/commit/6839f3fbfe472fb2fd492a9648bb97d1654bbb3b)) + + +### Bug Fixes + +* Return old paths for contract-verification API ([#2356](https://github.com/matter-labs/zksync-2-dev/issues/2356)) ([605a3ac](https://github.com/matter-labs/zksync-2-dev/commit/605a3ac0951241a892ee8f0832b69336299da6c6)) + + +### Performance Improvements + +* **db:** Optimize loading L1 batch header ([#2343](https://github.com/matter-labs/zksync-2-dev/issues/2343)) ([1274469](https://github.com/matter-labs/zksync-2-dev/commit/1274469b0618582d2027bc7b8dbda779486a553d)) + ## [5.28.1](https://github.com/matter-labs/zksync-2-dev/compare/core-v5.28.0...core-v5.28.1) (2023-08-10) diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index 6a57884dff7f..875f8559a26f 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -11,14 +11,14 @@ description = "The zkEVM contract verifier" publish = false # We don't want to publish our binaries. [dependencies] -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -zksync_contracts = { path = "../../lib/contracts", version = "1.0" } -zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_types = { path = "../../lib/types" } +zksync_dal = { path = "../../lib/dal" } +zksync_config = { path = "../../lib/config" } +zksync_contracts = { path = "../../lib/contracts" } +zksync_queued_job_processor = { path = "../../lib/queued_job_processor" } +zksync_utils = { path = "../../lib/utils" } +prometheus_exporter = { path = "../../lib/prometheus_exporter" } +vlog = { path = "../../lib/vlog" } tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } @@ -27,10 +27,11 @@ thiserror = "1.0" chrono = "0.4" serde_json = "1.0" ethabi = "18.0.0" -metrics = "0.20" +metrics = "0.21" hex = "0.4" serde = { version = "1.0", features = ["derive"] } structopt = "0.3.20" lazy_static = "1.4" tempfile = "3.0.2" regex = "1" +tracing = "0.1" diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index b806d4e09520..24f95d8dd4f8 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -1,5 +1,6 @@ use std::cell::RefCell; +use prometheus_exporter::PrometheusExporterConfig; use zksync_config::{configs::PrometheusConfig, ApiConfig, ContractVerifierConfig}; use zksync_dal::ConnectionPool; use zksync_queued_job_processor::JobProcessor; @@ -129,16 +130,32 @@ async fn main() { listener_port: verifier_config.prometheus_port, ..ApiConfig::from_env().prometheus }; - let pool = ConnectionPool::singleton(DbVariant::Master).build().await; - - vlog::init(); - let sentry_guard = vlog::init_sentry(); - match sentry_guard { - Some(_) => vlog::info!( - "Starting Sentry url: {}", - std::env::var("MISC_SENTRY_URL").unwrap(), - ), - None => vlog::info!("No sentry url configured"), + let pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .unwrap(); + + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = &sentry_url { + builder = builder + .with_sentry_url(sentry_url) + .expect("Invalid Sentry URL") + .with_sentry_environment(environment); + } + let _guard = builder.build(); + + // Report whether sentry is running after the logging subsystem was initialized. + if let Some(sentry_url) = sentry_url { + tracing::info!("Sentry configured with URL: {sentry_url}"); + } else { + tracing::info!("No sentry URL was provided"); } let (stop_sender, stop_receiver) = watch::channel(false); @@ -156,8 +173,13 @@ async fn main() { let contract_verifier = ContractVerifier::new(verifier_config, pool); let tasks = vec![ - tokio::spawn(contract_verifier.run(stop_receiver, opt.jobs_number)), - prometheus_exporter::run_prometheus_exporter(prometheus_config.listener_port, None), + // todo PLA-335: Leftovers after the prover DB split. + // The prover connection pool is not used by the contract verifier, but we need to pass it + // since `JobProcessor` trait requires it. + tokio::spawn(contract_verifier.run(stop_receiver.clone(), opt.jobs_number)), + tokio::spawn( + PrometheusExporterConfig::pull(prometheus_config.listener_port).run(stop_receiver), + ), ]; let particular_crypto_alerts = None; @@ -166,7 +188,7 @@ async fn main() { tokio::select! { _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = stop_signal_receiver.next() => { - vlog::info!("Stop signal received, shutting down"); + tracing::info!("Stop signal received, shutting down"); }, }; let _ = stop_sender.send(true); diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index e39c4796a2b0..7b8bd43fe944 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -63,7 +63,7 @@ impl ContractVerifier { .get_contract_info_for_verification(request.req.contract_address).await .unwrap() .ok_or_else(|| { - vlog::warn!("Contract is missing in DB for already accepted verification request. Contract address: {:#?}", request.req.contract_address); + tracing::warn!("Contract is missing in DB for already accepted verification request. Contract address: {:#?}", request.req.contract_address); ContractVerifierError::InternalError })?; let constructor_args = Self::decode_constructor_arguments_from_calldata( @@ -173,7 +173,7 @@ impl ContractVerifier { let bytecode = hex::decode(bytecode_str).unwrap(); let abi = contract["abi"].clone(); if !abi.is_array() { - vlog::error!( + tracing::error!( "zksolc returned unexpected value for ABI: {}", serde_json::to_string_pretty(&abi).unwrap() ); @@ -424,7 +424,7 @@ impl ContractVerifier { .save_verification_info(info) .await .unwrap(); - vlog::info!("Successfully processed request with id = {}", request_id); + tracing::info!("Successfully processed request with id = {}", request_id); } Err(error) => { let error_message = error.to_string(); @@ -439,7 +439,7 @@ impl ContractVerifier { .save_verification_error(request_id, error_message, compilation_errors, None) .await .unwrap(); - vlog::info!("Request with id = {} was failed", request_id); + tracing::info!("Request with id = {} was failed", request_id); } } } @@ -495,7 +495,7 @@ impl JobProcessor for ContractVerifier { ) -> tokio::task::JoinHandle<()> { let connection_pool = self.connection_pool.clone(); tokio::task::spawn(async move { - vlog::info!("Started to process request with id = {}", job.id); + tracing::info!("Started to process request with id = {}", job.id); let config: ContractVerifierConfig = ContractVerifierConfig::from_env(); let mut connection = connection_pool.access_storage().await; diff --git a/core/bin/enumeration_indices_migration/Cargo.toml b/core/bin/enumeration_indices_migration/Cargo.toml deleted file mode 100644 index db33b6f00767..000000000000 --- a/core/bin/enumeration_indices_migration/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "enumeration_indices_migration" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tokio = { version = "1" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_core = { path = "../../bin/zksync_core", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/bin/enumeration_indices_migration/src/main.rs b/core/bin/enumeration_indices_migration/src/main.rs deleted file mode 100644 index 86d581f379b7..000000000000 --- a/core/bin/enumeration_indices_migration/src/main.rs +++ /dev/null @@ -1,11 +0,0 @@ -use zksync_dal::connection::DbVariant; -use zksync_dal::ConnectionPool; - -#[tokio::main] -async fn main() { - vlog::init(); - - let pool = ConnectionPool::singleton(DbVariant::Master).build().await; - let mut storage = pool.access_storage().await; - zksync_core::state_keeper::set_missing_initial_writes_indices(&mut storage).await; -} diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index c8a0516bd1e3..f6f83c60af88 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_external_node" -version = "1.0.0" +version = "0.1.0" edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -11,20 +11,20 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] -zksync_core = { path = "../zksync_core", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -zksync_storage = { path = "../../lib/storage", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_state = { path = "../../lib/state", version = "1.0" } -zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } -zksync_contracts = { path = "../../lib/contracts", version = "1.0" } +zksync_core = { path = "../zksync_core" } +zksync_dal = { path = "../../lib/dal" } +zksync_config = { path = "../../lib/config" } +zksync_storage = { path = "../../lib/storage" } +zksync_utils = { path = "../../lib/utils" } +zksync_state = { path = "../../lib/state" } +zksync_basic_types = { path = "../../lib/basic_types" } +zksync_contracts = { path = "../../lib/contracts" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } -zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0" } -zksync_types = { path = "../../lib/types", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter" } +zksync_health_check = { path = "../../lib/health_check" } +zksync_web3_decl = { path = "../../lib/web3_decl" } +zksync_types = { path = "../../lib/types" } +vlog = { path = "../../lib/vlog" } anyhow = "1.0" tokio = { version = "1", features = ["time"] } @@ -32,3 +32,5 @@ futures = "0.3" serde = { version = "1.0", features = ["derive"] } envy = "0.4" url = "2.4" +clap = { version = "4.2.4", features = ["derive"] } +tracing = "0.1" diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index f6506c9f1dde..35b1e91bc08a 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -485,6 +485,7 @@ impl From for TxSenderConfig { // and they will be enforced by the main node anyway. max_allowed_l2_tx_gas_limit: u32::MAX, validation_computational_gas_limit: u32::MAX, + chain_id: config.remote.l2_chain_id, } } } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 28d60c4d3a44..a27f9bd19d22 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -1,10 +1,11 @@ use anyhow::Context; +use clap::Parser; use tokio::{sync::watch, task, time::sleep}; use std::{sync::Arc, time::Duration}; -use prometheus_exporter::run_prometheus_exporter; -use zksync_basic_types::Address; +use prometheus_exporter::PrometheusExporterConfig; +use zksync_basic_types::{Address, L2ChainId}; use zksync_core::{ api_server::{ execution_sandbox::VmConcurrencyLimiter, @@ -21,8 +22,7 @@ use zksync_core::{ reorg_detector::ReorgDetector, setup_sigint_handler, state_keeper::{ - L1BatchExecutorBuilder, MainBatchExecutorBuilder, MultiVMConfig, SealManager, - ZkSyncStateKeeper, + L1BatchExecutorBuilder, MainBatchExecutorBuilder, SealManager, ZkSyncStateKeeper, }, sync_layer::{ batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, @@ -50,7 +50,7 @@ async fn build_state_keeper( sync_state: SyncState, l2_erc20_bridge_addr: Address, stop_receiver: watch::Receiver, - use_multivm: bool, + chain_id: L2ChainId, ) -> ZkSyncStateKeeper { let en_sealer = ExternalNodeSealer::new(action_queue.clone()); let main_node_url = config.required.main_node_url().unwrap(); @@ -69,28 +69,13 @@ async fn build_state_keeper( // We only need call traces on the external node if the `debug_` namespace is enabled. let save_call_traces = config.optional.api_namespaces().contains(&Namespace::Debug); - // Only supply MultiVM config if the corresponding feature is enabled. - let multivm_config = use_multivm.then(|| { - vlog::error!( - "Using experimental MultiVM support! The feature is not ready, use at your own risk!" - ); - if main_node_url.contains("mainnet") { - MultiVMConfig::mainnet_config_wip() - } else if main_node_url.contains("testnet") { - MultiVMConfig::testnet_config_wip() - } else { - panic!("MultiVM can only be configured for mainnet/testnet now") - } - }); - let batch_executor_base: Box = Box::new(MainBatchExecutorBuilder::new( state_keeper_db_path, connection_pool.clone(), max_allowed_l2_tx_gas_limit, save_call_traces, - validation_computational_gas_limit, - multivm_config, + false, )); let io = Box::new( @@ -100,21 +85,25 @@ async fn build_state_keeper( sync_state, main_node_url, l2_erc20_bridge_addr, + validation_computational_gas_limit, + chain_id, ) .await, ); + io.recalculate_miniblock_hashes().await; + ZkSyncStateKeeper::new(stop_receiver, io, batch_executor_base, sealer) } async fn init_tasks( config: ExternalNodeConfig, connection_pool: ConnectionPool, -) -> ( - Vec>, +) -> anyhow::Result<( + Vec>>, watch::Sender, HealthCheckHandle, -) { +)> { let main_node_url = config .required .main_node_url() @@ -134,13 +123,16 @@ async fn init_tasks( sync_state.clone(), config.remote.l2_erc20_bridge_addr, stop_receiver.clone(), - config.optional.experimental_multivm_support, + config.remote.l2_chain_id, ) .await; let singleton_pool_builder = ConnectionPool::singleton(DbVariant::Master); let fetcher = MainNodeFetcher::new( - singleton_pool_builder.build().await, + singleton_pool_builder + .build() + .await + .context("failed to build a connection pool for MainNodeFetcher")?, &main_node_url, action_queue.clone(), sync_state.clone(), @@ -163,24 +155,41 @@ async fn init_tasks( &config .required .eth_client_url() - .expect("L1 client URL is incorrect"), - 10, - singleton_pool_builder.build().await, + .context("L1 client URL is incorrect")?, + 10, // TODO (BFT-97): Make it a part of a proper EN config + singleton_pool_builder + .build() + .await + .context("failed to build connection pool for ConsistencyChecker")?, ); - let batch_status_updater = - BatchStatusUpdater::new(&main_node_url, singleton_pool_builder.build().await).await; + let batch_status_updater = BatchStatusUpdater::new( + &main_node_url, + singleton_pool_builder + .build() + .await + .context("failed to build a connection pool for BatchStatusUpdater")?, + ) + .await; // Run the components. let tree_stop_receiver = stop_receiver.clone(); - let tree_pool = singleton_pool_builder.build().await; - let prover_tree_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; + let tree_pool = singleton_pool_builder + .build() + .await + .context("failed to build a tree_pool")?; + // todo: PLA-335 + let prover_tree_pool = ConnectionPool::singleton(DbVariant::Prover) + .build() + .await + .context("failed to build a prover_tree_pool")?; let tree_handle = task::spawn(metadata_calculator.run(tree_pool, prover_tree_pool, tree_stop_receiver)); let consistency_checker_handle = if !config.optional.experimental_multivm_support { Some(tokio::spawn(consistency_checker.run(stop_receiver.clone()))) } else { + // TODO (BFT-264): Current behavior of consistency checker makes development of MultiVM harder. None }; @@ -219,7 +228,7 @@ async fn init_tasks( .build( gas_adjuster, Arc::new(vm_concurrency_limiter), - ApiContracts::load_from_disk(), + ApiContracts::load_from_disk(), // TODO (BFT-138): Allow to dynamically reload API contracts storage_caches, ) .await; @@ -262,8 +271,8 @@ async fn init_tasks( healthchecks, ); if let Some(port) = config.optional.prometheus_port { - let prometheus_task = run_prometheus_exporter(port, None); - task_handles.push(prometheus_task); + let prometheus_task = PrometheusExporterConfig::pull(port).run(stop_receiver.clone()); + task_handles.push(tokio::spawn(prometheus_task)); } task_handles.extend(http_api_handle); @@ -279,7 +288,7 @@ async fn init_tasks( task_handles.push(consistency_checker); } - (task_handles, stop_sender, healthcheck_handle) + Ok((task_handles, stop_sender, healthcheck_handle)) } async fn shutdown_components( @@ -295,27 +304,84 @@ async fn shutdown_components( healthcheck_handle.stop().await; } +#[derive(Debug, Parser)] +#[structopt(author = "Matter Labs", version)] +struct Cli { + #[arg(long)] + revert_pending_l1_batch: bool, +} + #[tokio::main] async fn main() -> anyhow::Result<()> { // Initial setup. + let opt = Cli::parse(); + + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = &sentry_url { + builder = builder + .with_sentry_url(sentry_url) + .expect("Invalid Sentry URL") + .with_sentry_environment(environment); + } + let _guard = builder.build(); + + // Report whether sentry is running after the logging subsystem was initialized. + if let Some(sentry_url) = sentry_url { + tracing::info!("Sentry configured with URL: {sentry_url}"); + } else { + tracing::info!("No sentry URL was provided"); + } - vlog::init(); - let _sentry_guard = vlog::init_sentry(); let config = ExternalNodeConfig::collect() .await - .expect("Failed to load external node config"); + .context("Failed to load external node config")?; let main_node_url = config .required .main_node_url() - .expect("Main node URL is incorrect"); + .context("Main node URL is incorrect")?; + + let connection_pool = ConnectionPool::builder(DbVariant::Master) + .build() + .await + .context("failed to build a connection_pool")?; + + if opt.revert_pending_l1_batch { + tracing::info!("Rolling pending L1 batch back.."); + let reverter = BlockReverter::new( + config.required.state_cache_path, + config.required.merkle_tree_path, + None, + connection_pool.clone(), + L1ExecutedBatchesRevert::Allowed, + ); + + let mut connection = connection_pool.access_storage().await; + let sealed_l1_batch_number = connection.blocks_dal().get_sealed_l1_batch_number().await; + drop(connection); + + tracing::info!("Rolling back to l1 batch number {sealed_l1_batch_number}"); + reverter + .rollback_db(sealed_l1_batch_number, BlockReverterFlags::all()) + .await; + tracing::info!( + "Rollback successfully completed, the node has to restart to continue working" + ); + return Ok(()); + } - let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; let sigint_receiver = setup_sigint_handler(); - vlog::warn!("The external node is in the alpha phase, and should be used with caution."); + tracing::warn!("The external node is in the alpha phase, and should be used with caution."); - vlog::info!("Started the external node"); - vlog::info!("Main node URL is: {}", main_node_url); + tracing::info!("Started the external node"); + tracing::info!("Main node URL is: {}", main_node_url); // Make sure that genesis is performed. perform_genesis_if_needed( @@ -327,7 +393,9 @@ async fn main() -> anyhow::Result<()> { .context("Performing genesis failed")?; let (task_handles, stop_sender, health_check_handle) = - init_tasks(config.clone(), connection_pool.clone()).await; + init_tasks(config.clone(), connection_pool.clone()) + .await + .context("init_tasks")?; let reorg_detector = ReorgDetector::new(&main_node_url, connection_pool.clone()); let reorg_detector_handle = tokio::spawn(reorg_detector.run()); @@ -338,11 +406,11 @@ async fn main() -> anyhow::Result<()> { tokio::select! { _ = wait_for_tasks(task_handles, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = sigint_receiver => { - vlog::info!("Stop signal received, shutting down"); + tracing::info!("Stop signal received, shutting down"); }, last_correct_batch = reorg_detector_handle => { if let Ok(last_correct_batch) = last_correct_batch { - vlog::info!("Performing rollback to block {}", last_correct_batch); + tracing::info!("Performing rollback to block {}", last_correct_batch); shutdown_components(stop_sender, health_check_handle).await; let reverter = BlockReverter::new( config.required.state_cache_path, @@ -354,10 +422,10 @@ async fn main() -> anyhow::Result<()> { reverter .rollback_db(last_correct_batch, BlockReverterFlags::all()) .await; - vlog::info!("Rollback successfully completed, the node has to restart to continue working"); + tracing::info!("Rollback successfully completed, the node has to restart to continue working"); return Ok(()); } else { - vlog::error!("Reorg detector actor failed"); + tracing::error!("Reorg detector actor failed"); } } } diff --git a/core/bin/storage_logs_dedup_migration/Cargo.toml b/core/bin/storage_logs_dedup_migration/Cargo.toml new file mode 100644 index 000000000000..44e8057aa93b --- /dev/null +++ b/core/bin/storage_logs_dedup_migration/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "storage_logs_dedup_migration" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "storage_logs_dedup_migration" +path = "src/main.rs" + +[[bin]] +name = "storage_logs_dedup_migration_consistency_checker" +path = "src/consistency.rs" + +[dependencies] +tokio = { version = "1" } +zksync_dal = { path = "../../lib/dal" } +zksync_types = { path = "../../lib/types" } +clap = { version = "4.2.4", features = ["derive"] } diff --git a/core/bin/storage_logs_dedup_migration/src/consistency.rs b/core/bin/storage_logs_dedup_migration/src/consistency.rs new file mode 100644 index 000000000000..bf96232a875d --- /dev/null +++ b/core/bin/storage_logs_dedup_migration/src/consistency.rs @@ -0,0 +1,65 @@ +use clap::Parser; + +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; +use zksync_types::MiniblockNumber; + +const MIGRATED_TABLE: &str = "storage_logs"; +const NOT_MIGRATED_TABLE: &str = "storage_logs_backup"; + +#[derive(Debug, Parser)] +#[command( + author = "Matter Labs", + about = "Consistency checker for the migration" +)] +struct Cli { + /// Miniblock number to start check from. + #[arg(long)] + from_miniblock: u32, + /// Miniblock number to check up to. + #[arg(long)] + to_miniblock: u32, +} + +#[tokio::main] +async fn main() { + let opt = Cli::parse(); + let pool = ConnectionPool::singleton(DbVariant::Replica) + .build() + .await + .unwrap(); + let mut connection = pool.access_storage().await; + + println!( + "Consistency check started for miniblock range {}..={}", + opt.from_miniblock, opt.to_miniblock + ); + + for miniblock_number in opt.from_miniblock..=opt.to_miniblock { + let miniblock_number = MiniblockNumber(miniblock_number); + // Load all storage logs of miniblock. + let storage_logs = connection + .storage_logs_dal() + .get_miniblock_storage_logs_from_table(miniblock_number, NOT_MIGRATED_TABLE) + .await; + + for (hashed_key, _, _) in storage_logs { + let value_before_migration = connection + .storage_logs_dal() + .get_storage_value_from_table(hashed_key, miniblock_number, NOT_MIGRATED_TABLE) + .await; + let value_after_migration = connection + .storage_logs_dal() + .get_storage_value_from_table(hashed_key, miniblock_number, MIGRATED_TABLE) + .await; + assert_eq!( + value_before_migration, value_after_migration, + "Found divergency for hashed_key = {hashed_key:?}, miniblock {miniblock_number}" + ); + } + + println!("Processed miniblock {miniblock_number}"); + } + + println!("Finished"); +} diff --git a/core/bin/storage_logs_dedup_migration/src/main.rs b/core/bin/storage_logs_dedup_migration/src/main.rs new file mode 100644 index 000000000000..d58ef99bdc6b --- /dev/null +++ b/core/bin/storage_logs_dedup_migration/src/main.rs @@ -0,0 +1,165 @@ +use std::collections::hash_map::{Entry, HashMap}; + +use clap::Parser; + +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; +use zksync_types::{MiniblockNumber, H256}; + +/// When the threshold is reached then the migration is blocked on vacuuming. +const UNVACUUMED_ROWS_THRESHOLD: usize = 2_000_000; + +#[derive(Debug, Parser)] +#[command( + author = "Matter Labs", + about = "Migration that deduplicates rows in storage_logs DB table" +)] +struct Cli { + /// Miniblock number to start migration from. + #[arg(long)] + start_from_miniblock: u32, +} + +/// Blockchain state cache +struct StateCache { + /// (hashed_key => value) mapping. + pub storage: HashMap, + /// Miniblock number cache is valid for. + pub miniblock: Option, + /// Flag indicating if state is initially empty. + pub is_state_initially_empty: bool, +} + +impl StateCache { + /// Loads value from state if present. + pub fn get_value(&mut self, hashed_key: H256) -> Option { + if let Entry::Vacant(e) = self.storage.entry(hashed_key) { + if self.is_state_initially_empty { + e.insert(H256::zero()); + } + } + + self.storage.get(&hashed_key).copied() + } +} + +#[tokio::main] +async fn main() { + let opt = Cli::parse(); + let pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .unwrap(); + let mut connection = pool.access_storage().await; + + let sealed_miniblock = connection.blocks_dal().get_sealed_miniblock_number().await; + println!( + "Migration started for miniblock range {}..={}", + opt.start_from_miniblock, sealed_miniblock + ); + + let (previous_miniblock, is_state_initially_empty) = if opt.start_from_miniblock == 0 { + (None, true) + } else { + (Some((opt.start_from_miniblock - 1).into()), false) + }; + + let mut state_cache = StateCache { + storage: HashMap::new(), + miniblock: previous_miniblock, + is_state_initially_empty, + }; + + let mut number_of_unvacuum_rows = 0; + + for miniblock_number in opt.start_from_miniblock..=sealed_miniblock.0 { + let miniblock_number = MiniblockNumber(miniblock_number); + + // Load all storage logs of miniblock. + let storage_logs = connection + .storage_logs_dal() + .get_miniblock_storage_logs(miniblock_number) + .await; + let initial_storage_logs_count = storage_logs.len(); + + // Load previous values from memory. + let prev_values: HashMap<_, _> = storage_logs + .iter() + .map(|(hashed_key, _, _)| (*hashed_key, state_cache.get_value(*hashed_key))) + .collect(); + + // Load missing previous values from database. + let missing_keys: Vec<_> = prev_values + .iter() + .filter_map(|(key, value)| (value.is_none()).then_some(*key)) + .collect(); + + let in_memory_prev_values_iter = prev_values.into_iter().filter_map(|(k, v)| Some((k, v?))); + let prev_values: HashMap<_, _> = if miniblock_number.0 == 0 || missing_keys.is_empty() { + assert!(missing_keys.is_empty()); + in_memory_prev_values_iter.collect() + } else { + let values_for_missing_keys: HashMap<_, _> = connection + .storage_logs_dal() + .get_storage_values(&missing_keys, miniblock_number - 1) + .await; + + in_memory_prev_values_iter + .chain( + values_for_missing_keys + .into_iter() + .map(|(k, v)| (k, v.unwrap_or_else(H256::zero))), + ) + .collect() + }; + + // Effective state for keys that were touched in the current miniblock. + let current_values: HashMap<_, _> = storage_logs + .into_iter() + .map(|(hashed_key, value, operation_number)| (hashed_key, (value, operation_number))) + .collect(); + + // Collect effective storage logs of the miniblock and their operation numbers. + let (effective_logs, op_numbers_to_retain): (Vec<_>, Vec<_>) = current_values + .into_iter() + .filter_map(|(hashed_key, (value, operation_number))| { + let prev_value = prev_values[&hashed_key]; + (value != prev_value).then_some(((hashed_key, value), operation_number as i32)) + }) + .unzip(); + + // Remove others, i.e. non-effective logs from DB. + connection + .storage_logs_dal() + .retain_storage_logs(miniblock_number, &op_numbers_to_retain) + .await; + number_of_unvacuum_rows += initial_storage_logs_count - op_numbers_to_retain.len(); + + // Update state cache. + for (key, value) in effective_logs { + state_cache.storage.insert(key, value); + } + state_cache.miniblock = Some(miniblock_number); + + if miniblock_number.0 < 100 || miniblock_number.0 % 100 == 0 { + println!("Deduplicated logs for miniblock {miniblock_number}, number of unvacuumed rows {number_of_unvacuum_rows}"); + } + + if number_of_unvacuum_rows > UNVACUUMED_ROWS_THRESHOLD { + let started_at = std::time::Instant::now(); + println!("Starting vacuuming"); + connection.storage_logs_dal().vacuum_storage_logs().await; + number_of_unvacuum_rows = 0; + println!("Vacuum finished in {:?}", started_at.elapsed()); + } + } + + if number_of_unvacuum_rows > 0 { + let started_at = std::time::Instant::now(); + println!("Starting vacuuming"); + connection.storage_logs_dal().vacuum_storage_logs().await; + println!("Vacuum finished in {:?}", started_at.elapsed()); + } + + println!("Finished"); +} diff --git a/core/bin/system-constants-generator/Cargo.toml b/core/bin/system-constants-generator/Cargo.toml index 4d4fea76b185..561ac5167385 100644 --- a/core/bin/system-constants-generator/Cargo.toml +++ b/core/bin/system-constants-generator/Cargo.toml @@ -11,11 +11,11 @@ description = "Tool for generating JSON files with the system constants for L1/L publish = false # We don't want to publish our binaries. [dependencies] -zksync_state = { path = "../../lib/state", version = "1.0" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_contracts = {path = "../../lib/contracts", version = "1.0" } -vm = {path = "../../lib/vm", version="0.1.0" } +zksync_state = { path = "../../lib/state" } +zksync_types = { path = "../../lib/types" } +zksync_utils = { path = "../../lib/utils" } +zksync_contracts = { path = "../../lib/contracts" } +vm = { path = "../../lib/vm" } codegen = "0.2.0" diff --git a/core/bin/system-constants-generator/src/intrinsic_costs.rs b/core/bin/system-constants-generator/src/intrinsic_costs.rs index ef8db44829f4..c663939db6ee 100644 --- a/core/bin/system-constants-generator/src/intrinsic_costs.rs +++ b/core/bin/system-constants-generator/src/intrinsic_costs.rs @@ -21,6 +21,7 @@ pub(crate) struct VmSpentResourcesResult { // The total amount of gas the users have paid for. pub(crate) total_gas_paid: u32, // The total amount of gas the users have paid for public data. + // TODO (SMA-1698): make it an option, since sometimes its calculation is skipped. pub(crate) total_pubdata_paid: u32, } diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index 741d8e0500d6..d101647134f8 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -129,8 +129,8 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.raw( vec![ - "H", - "A", + "// TODO (SMA-1699): Use this method to ensure that the transactions provide enough", + "// intrinsic gas on the API level.", ] .join("\n"), ); diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 1f1d5e4abeb9..3af5df328f42 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -266,6 +266,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( &GAS_TEST_SYSTEM_CONTRACTS, TxExecutionMode::VerifyExecute, ); + vm.start_next_l2_block(vm.get_current_l2_block_info().dummy_next_block_info()); let mut total_gas_refunded = 0; for tx in txs { @@ -305,6 +306,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( result.full_result.contracts_used, result.full_result.cycles_used, result.full_result.computational_gas_used, + result.full_result.total_log_queries, ); VmSpentResourcesResult { diff --git a/core/bin/verification_key_generator_and_server/Cargo.toml b/core/bin/verification_key_generator_and_server/Cargo.toml index 837e52898765..c6ade8c3c05c 100644 --- a/core/bin/verification_key_generator_and_server/Cargo.toml +++ b/core/bin/verification_key_generator_and_server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_verification_key_generator_and_server" -version = "1.0.0" +version = "0.1.0" edition = "2018" [lib] @@ -20,10 +20,10 @@ name = "zksync_commitment_generator" path = "src/commitment_generator.rs" [dependencies] -zksync_types = {path = "../../lib/types", version = "1.0" } -zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -circuit_testing = {git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main"} +zksync_types = { path = "../../lib/types" } +zksync_prover_utils = { path = "../../lib/prover_utils" } +vlog = { path = "../../lib/vlog" } +circuit_testing = { git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main" } itertools = "0.10.5" bincode = "1.3.3" @@ -32,3 +32,4 @@ hex = "0.4.3" structopt = "0.3.26" ff = { package = "ff_ce", version = "0.14.1" } once_cell = "1.8.0" +tracing = "0.1" diff --git a/core/bin/verification_key_generator_and_server/data/verification_3_key.json b/core/bin/verification_key_generator_and_server/data/verification_3_key.json index d6435ac58dcc..efc3726eccd1 100644 --- a/core/bin/verification_key_generator_and_server/data/verification_3_key.json +++ b/core/bin/verification_key_generator_and_server/data/verification_3_key.json @@ -6,121 +6,121 @@ "gate_setup_commitments": [ { "x": [ - 16132018988898810280, - 14629326668513402207, - 13927012963228438244, - 438836257494388653 + 974750104498669965, + 8834694375170731028, + 17769568165179068263, + 1849964290686413257 ], "y": [ - 9208149293484976939, - 6928926474381817269, - 9049839661709678601, - 3159086914561291422 + 2843958502709968021, + 14935491193053445350, + 17474331455917570677, + 3480256926594645294 ], "infinity": false }, { "x": [ - 8167991757189792621, - 12144200201207014477, - 11734943669898586373, - 2193141783069775476 + 12247266302470255326, + 5464334033464606744, + 14546720066962635103, + 3390803970213094244 ], "y": [ - 17360142870886900923, - 13115961790852995117, - 18410766760283752426, - 58598073722965045 + 1712883459777313087, + 8894684513803091578, + 7336029034040207862, + 1084942733964754038 ], "infinity": false }, { "x": [ - 6146007756603285924, - 6323811950493532548, - 13106922048360852624, - 466032134849380491 + 11977576082511042092, + 13911318721427630536, + 319094179978428102, + 953394664847088490 ], "y": [ - 18129312257576062028, - 1862682333985860999, - 11375866511232479537, - 1993295577101228431 + 5661602966428088380, + 18066888770140331931, + 10148625466830766086, + 532999801462127665 ], "infinity": false }, { "x": [ - 2809427848829662407, - 12284258556184215755, - 7636306187033076241, - 2114748180158946072 + 10638316621700142822, + 6209825954391834011, + 6018402549491433521, + 2545954919587131385 ], "y": [ - 14938290724462753010, - 4469670089738748357, - 4333038821107617112, - 3325252483617185630 + 3871396302214628234, + 10421121582832311901, + 3487262368594849688, + 47097530491220969 ], "infinity": false }, { "x": [ - 2811571661472266293, - 10805076128876270563, - 13466352093104839476, - 2679418577316861424 + 5177078736350587057, + 913561536392131154, + 5845225668116211782, + 1148177573394811202 ], "y": [ - 6289932497598671988, - 10287193347450766117, - 1002213271037575380, - 2919063136646142593 + 8211065483139055749, + 11150796128594731149, + 12060516803886544192, + 1369115203017663219 ], "infinity": false }, { "x": [ - 18040905380309423259, - 2992050369681309630, - 7602002287396364748, - 610279260467961564 + 13164869081104983842, + 8055457852373227775, + 14586708642322040767, + 1635508642571745116 ], "y": [ - 13064134520386336340, - 16129927882519803745, - 2367043478173028808, - 756367716463615162 + 13200963466266892674, + 5743120645853669652, + 13845956436885115425, + 190245686570654182 ], "infinity": false }, { "x": [ - 11205248788812451673, - 12805888223872356565, - 3192506617534475900, - 1788055857068087500 + 14509622964666644543, + 14326815147327339718, + 14403865749203816615, + 3250250446651605086 ], "y": [ - 14249524255052314472, - 2804915545701549755, - 8453552885321586295, - 1517275418947033059 + 16982880826411734238, + 7223038929743846372, + 13243677057981888895, + 3343376109946605946 ], "infinity": false }, { "x": [ - 3370213179722093102, - 15232829601741289326, - 15264565963863545427, - 3399686965437331565 + 2186705028467599783, + 10754157155083578321, + 9835223072941921904, + 622934131449235283 ], "y": [ - 695972907603137570, - 1065210392506346562, - 13377574163460762697, - 1438903077274895969 + 18146384691082289702, + 3710418457832183420, + 9065618198278602094, + 1385809660894704773 ], "infinity": false } @@ -128,31 +128,31 @@ "gate_selectors_commitments": [ { "x": [ - 2916037360975110368, - 2115449174392144212, - 14608640754181148556, - 971169600851407725 + 17931943108498820694, + 4676695383650659094, + 9553383984112211657, + 2582659305204352628 ], "y": [ - 8394386574107401239, - 11833730770085674615, - 2274226308564527013, - 757668483662158276 + 12954711565738560879, + 8077826911980788091, + 11395265968148743331, + 2855607571527172113 ], "infinity": false }, { "x": [ - 15790049926918974381, - 1498916798022404031, - 7512683194829329777, - 130629875891522412 + 1579731699170899555, + 2930979681521170129, + 14423227389748779725, + 3843483067412713 ], "y": [ - 10570165756172650947, - 16024260722575267127, - 10403393574807352915, - 2193441270275759098 + 12757858203360676100, + 11658617912640524507, + 18404463112235833117, + 216398038700598122 ], "infinity": false } @@ -160,78 +160,78 @@ "permutation_commitments": [ { "x": [ - 16937140281571163722, - 8528685593141239391, - 14604264781363495282, - 137274269481496519 + 17803109611249396349, + 11283859494780689211, + 13999735262776985506, + 1616317395767274315 ], "y": [ - 12832201597709954736, - 14731869942190515158, - 6098275778634872340, - 1894980855192645736 + 4702080082438660327, + 10318021266807502248, + 1343468927015043909, + 1458947702127238817 ], "infinity": false }, { "x": [ - 9506243675355227355, - 4921543690455935123, - 4864521994209504368, - 1194838968611913113 + 13991248091180946539, + 9572452770464844385, + 7281256466642465445, + 1589821161594539260 ], "y": [ - 3514343005912152815, - 4590983463867380289, - 4810604720730504186, - 2370963330363647136 + 16330872592308522669, + 11643961078499332590, + 7621286777424912214, + 1961788650881680195 ], "infinity": false }, { "x": [ - 17229604967143530892, - 10436078466088746474, - 8131435850212543426, - 62279533258920234 + 14854997120241085994, + 893859077870132655, + 10853933192917459827, + 2671373989840251193 ], "y": [ - 13233331203150147346, - 1683012122586045301, - 12023735178019542936, - 209604889173254535 + 11492939649862087988, + 1925620351626108277, + 12007636802682139817, + 1315346956977275889 ], "infinity": false }, { "x": [ - 2317615578050559984, - 3409863650741942555, - 2800913349454859982, - 1145393423472998888 + 13343929807426311972, + 3234215523073799496, + 4658804614957804350, + 123243726695066707 ], "y": [ - 3313805684145217681, - 8698235642305641581, - 9555080423733977624, - 1868569776379940006 + 14958243475655956241, + 4034118281425140294, + 1019154098165161379, + 2657524750158613958 ], "infinity": false } ], - "total_lookup_entries_length": 15224202, + "total_lookup_entries_length": 15208907, "lookup_selector_commitment": { "x": [ - 8259805827727145280, - 16824074282046227807, - 3121477555564412954, - 1865943569871175431 + 3869759959209659371, + 17545310949245876386, + 6597968549104995840, + 1547642766729861753 ], "y": [ - 16644549437179353127, - 14517598437143192812, - 10007139167811704538, - 1155056757464351841 + 5629222579571396955, + 16315207580711001852, + 15947168783307514478, + 2534006098464270073 ], "infinity": false }, @@ -299,16 +299,16 @@ ], "lookup_table_type_commitment": { "x": [ - 15847017871013531611, - 15599972677346614519, - 7829590182636204190, - 1175429517043722092 + 6380759427317126685, + 6672737265924091686, + 14552369645196037262, + 1668823783737500912 ], "y": [ - 3566505229345713693, - 2712054860970341853, - 7992126959188622741, - 416242544149800237 + 4951884449279236371, + 16324193898368483526, + 10792452284404778772, + 929770155761471462 ], "infinity": false }, diff --git a/core/bin/verification_key_generator_and_server/src/commitment_generator.rs b/core/bin/verification_key_generator_and_server/src/commitment_generator.rs index e9a94a7566f2..9b8bd76a6268 100644 --- a/core/bin/verification_key_generator_and_server/src/commitment_generator.rs +++ b/core/bin/verification_key_generator_and_server/src/commitment_generator.rs @@ -4,7 +4,7 @@ use zksync_prover_utils::vk_commitment_helper::{ use zksync_verification_key_server::generate_commitments; fn main() { - vlog::info!("Starting commitment generation!"); + tracing::info!("Starting commitment generation!"); read_and_update_contract_toml(); } @@ -21,7 +21,7 @@ fn read_and_update_contract_toml() { get_toml_formatted_value(leaf_aggregation_commitment_hex); contract_doc["contracts"]["RECURSION_NODE_LEVEL_VK_HASH"] = get_toml_formatted_value(node_aggregation_commitment_hex); - vlog::info!("Updated toml content: {:?}", contract_doc.to_string()); + tracing::info!("Updated toml content: {:?}", contract_doc.to_string()); write_contract_toml(contract_doc); } diff --git a/core/bin/verification_key_generator_and_server/src/lib.rs b/core/bin/verification_key_generator_and_server/src/lib.rs index 5ba186a422aa..2b05363595b6 100644 --- a/core/bin/verification_key_generator_and_server/src/lib.rs +++ b/core/bin/verification_key_generator_and_server/src/lib.rs @@ -44,7 +44,7 @@ pub fn get_vk_for_circuit_type( circuit_type: u8, ) -> VerificationKey>> { let filepath = get_file_path(circuit_type); - vlog::info!("Fetching verification key from path: {}", filepath); + tracing::info!("Fetching verification key from path: {}", filepath); let text = std::fs::read_to_string(&filepath) .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); serde_json::from_str::>>>( @@ -63,7 +63,7 @@ pub fn save_vk_for_circuit_type( vk: VerificationKey>>, ) { let filepath = get_file_path(circuit_type); - vlog::info!("saving verification key to: {}", filepath); + tracing::info!("saving verification key to: {}", filepath); std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); } @@ -150,15 +150,15 @@ pub fn generate_commitments() -> (String, String, String) { let basic_circuit_commitment_hex = format!("0x{}", to_hex(&basic_circuit_commitment)); let leaf_aggregation_commitment_hex = format!("0x{}", to_hex(&leaf_aggregation_vk_commitment)); let node_aggregation_commitment_hex = format!("0x{}", to_hex(&node_aggregation_vk_commitment)); - vlog::info!( + tracing::info!( "basic circuit commitment {:?}", basic_circuit_commitment_hex ); - vlog::info!( + tracing::info!( "leaf aggregation commitment {:?}", leaf_aggregation_commitment_hex ); - vlog::info!( + tracing::info!( "node aggregation commitment {:?}", node_aggregation_commitment_hex ); diff --git a/core/bin/verification_key_generator_and_server/src/main.rs b/core/bin/verification_key_generator_and_server/src/main.rs index e556bfd24f59..30ffb0574d4d 100644 --- a/core/bin/verification_key_generator_and_server/src/main.rs +++ b/core/bin/verification_key_generator_and_server/src/main.rs @@ -15,7 +15,7 @@ fn main() { } else { (3..17).collect() }; - vlog::info!("Starting verification key generation!"); + tracing::info!("Starting verification key generation!"); get_circuits_for_vk() .into_iter() .filter(|c| circuit_types.contains(&c.numeric_circuit_type())) @@ -23,7 +23,7 @@ fn main() { } fn get_and_ensure_valid_circuit_type(circuit_type: String) -> u8 { - vlog::info!("Received circuit_type: {:?}", circuit_type); + tracing::info!("Received circuit_type: {:?}", circuit_type); circuit_type .parse::() .expect("Please specify a circuit type in range [1, 17]") @@ -37,7 +37,7 @@ fn generate_verification_key(circuit: ZkSyncCircuit(circuit.clone(), 26) .unwrap(); save_vk_for_circuit_type(circuit.numeric_circuit_type(), res); - vlog::info!( + tracing::info!( "Finished VK generation for circuit {:?} (id {:?})", circuit.short_description(), circuit.numeric_circuit_type() diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index 226a77035cc6..11db825e446e 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_core" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -11,37 +11,38 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] -zksync_state = { path = "../../lib/state", version = "1.0" } -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_dal = { path = "../../lib/dal", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_contracts = { path = "../../lib/contracts", version = "1.0" } -zksync_eth_client = { path = "../../lib/eth_client", version = "1.0" } -zksync_eth_signer = { path = "../../lib/eth_signer", version = "1.0" } -zksync_mempool = { path = "../../lib/mempool", version = "1.0" } -zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } -zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } -zksync_circuit_breaker = { path = "../../lib/circuit_breaker", version = "1.0" } -vm = { path = "../../lib/vm", version = "0.1.0" } -zksync_storage = { path = "../../lib/storage", version = "1.0" } -zksync_merkle_tree = { path = "../../lib/merkle_tree", version = "1.0" } -zksync_mini_merkle_tree = { path = "../../lib/mini_merkle_tree", version = "1.0" } -zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } -prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-features = false, features = [ - "server", "client" +zksync_state = { path = "../../lib/state" } +zksync_types = { path = "../../lib/types" } +zksync_dal = { path = "../../lib/dal" } +zksync_config = { path = "../../lib/config" } +zksync_utils = { path = "../../lib/utils" } +zksync_contracts = { path = "../../lib/contracts" } +zksync_eth_client = { path = "../../lib/eth_client" } +zksync_eth_signer = { path = "../../lib/eth_signer" } +zksync_mempool = { path = "../../lib/mempool" } +zksync_prover_utils = { path = "../../lib/prover_utils" } +zksync_queued_job_processor = { path = "../../lib/queued_job_processor" } +zksync_circuit_breaker = { path = "../../lib/circuit_breaker" } +vm = { path = "../../lib/vm" } +zksync_storage = { path = "../../lib/storage" } +zksync_merkle_tree = { path = "../../lib/merkle_tree" } +zksync_mini_merkle_tree = { path = "../../lib/mini_merkle_tree" } +zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server" } +prometheus_exporter = { path = "../../lib/prometheus_exporter" } +zksync_web3_decl = { path = "../../lib/web3_decl", default-features = false, features = [ + "server", + "client", ] } -zksync_object_store = { path = "../../lib/object_store", version = "1.0" } -zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_object_store = { path = "../../lib/object_store" } +zksync_health_check = { path = "../../lib/health_check" } +vlog = { path = "../../lib/vlog" } -multivm = { path = "../../lib/multivm", version = "0.1.0" } +multivm = { path = "../../lib/multivm" } clap = { version = "4.2.4", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -metrics = "0.20" +metrics = "0.21" itertools = "0.10.3" ctrlc = { version = "3.1", features = ["termination"] } rand = "0.8" @@ -68,7 +69,13 @@ hex = "0.4" governor = "0.4.2" tower-http = { version = "0.4.1", features = ["full"] } tower = { version = "0.4.13", features = ["full"] } -axum = { version = "0.6.19", default-features = false, features = ["http1", "json", "tokio"] } +axum = { version = "0.6.19", default-features = false, features = [ + "http1", + "json", + "tokio", +] } +once_cell = "1.7" + actix-rt = "2.2.0" actix-cors = "0.6.0-beta.2" @@ -80,8 +87,9 @@ tracing = "0.1.26" tikv-jemallocator = "0.5" [dev-dependencies] -db_test_macro = { path = "../../lib/db_test_macro", version = "0.1.0" } +db_test_macro = { path = "../../lib/db_test_macro" } assert_matches = "1.5" -once_cell = "1.7" +zksync_test_account = { path = "../../lib/test_account" } + tempfile = "3.0.2" diff --git a/core/bin/zksync_core/src/api_server/contract_verification/mod.rs b/core/bin/zksync_core/src/api_server/contract_verification/mod.rs index e49d56a695e2..f7d7be67a84c 100644 --- a/core/bin/zksync_core/src/api_server/contract_verification/mod.rs +++ b/core/bin/zksync_core/src/api_server/contract_verification/mod.rs @@ -51,7 +51,7 @@ pub fn start_server_thread_detached( replica_connection_pool: ConnectionPool, api_config: ContractVerificationApiConfig, mut stop_receiver: watch::Receiver, -) -> JoinHandle<()> { +) -> JoinHandle> { let (handler, panic_sender) = spawn_panic_handler(); std::thread::Builder::new() @@ -69,7 +69,7 @@ pub fn start_server_thread_detached( actix_rt::spawn(async move { if stop_receiver.changed().await.is_ok() { close_handle.stop(true).await; - vlog::info!( + tracing::info!( "Stop signal received, contract verification API is shutting down" ); } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs index b6a299eb46ca..f9c47c82ea58 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -6,26 +6,23 @@ //! //! This module is intended to be blocking. -use std::{ - collections::HashMap, - time::{Duration, Instant}, -}; +use multivm::{VmInstance, VmInstanceData}; +use std::time::{Duration, Instant}; +use vm::{constants::BLOCK_GAS_LIMIT, HistoryDisabled, L1BatchEnv, L2BlockEnv, SystemEnv}; -use vm::{ - vm_with_bootloader::{ - derive_base_fee_and_gas_per_pubdata, init_vm, BlockContext, BlockContextMode, - DerivedBlockContext, - }, - zk_evm::block_properties::BlockProperties, - HistoryDisabled, VmInstance, +use zksync_config::constants::{ + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; -use zksync_config::constants::ZKPORTER_IS_AVAILABLE; use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; use zksync_state::{PostgresStorage, ReadStorage, StorageView, WriteStorage}; use zksync_types::{ - api, get_nonce_key, + api, + block::{legacy_miniblock_hash, pack_block_info, unpack_block_info}, + get_nonce_key, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - L1BatchNumber, MiniblockNumber, Nonce, StorageKey, Transaction, H256, U256, + AccountTreeId, L1BatchNumber, MiniblockNumber, Nonce, ProtocolVersionId, StorageKey, + Transaction, H256, U256, }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; @@ -39,9 +36,8 @@ pub(super) fn apply_vm_in_sandbox( connection_pool: &ConnectionPool, tx: Transaction, block_args: BlockArgs, - storage_read_cache: HashMap, - apply: impl FnOnce(&mut Box>, Transaction) -> T, -) -> (T, HashMap) { + apply: impl FnOnce(&mut VmInstance<'_, PostgresStorage<'_>, HistoryDisabled>, Transaction) -> T, +) -> T { let stage_started_at = Instant::now(); let span = tracing::debug_span!("initialization").entered(); @@ -50,20 +46,25 @@ pub(super) fn apply_vm_in_sandbox( let connection_acquire_time = stage_started_at.elapsed(); // We don't want to emit too many logs. if connection_acquire_time > Duration::from_millis(10) { - vlog::debug!( + tracing::debug!( "Obtained connection (took {:?})", stage_started_at.elapsed() ); } let resolve_started_at = Instant::now(); - let (state_block_number, vm_block_number) = rt_handle - .block_on(block_args.resolve_block_numbers(&mut connection)) + let ResolvedBlockInfo { + state_l2_block_number, + vm_l1_batch_number, + l1_batch_timestamp, + protocol_version, + } = rt_handle + .block_on(block_args.resolve_block_info(&mut connection)) .expect("Failed resolving block numbers"); let resolve_time = resolve_started_at.elapsed(); // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { - vlog::debug!( + tracing::debug!( "Resolved block numbers (took {:?})", resolve_started_at.elapsed() ); @@ -72,14 +73,51 @@ pub(super) fn apply_vm_in_sandbox( if block_args.resolves_to_latest_sealed_miniblock() { shared_args .caches - .schedule_values_update(state_block_number); + .schedule_values_update(state_l2_block_number); } - let block_timestamp = block_args.block_timestamp_seconds(); - let storage = PostgresStorage::new(rt_handle.clone(), connection, state_block_number, false) + let mut l2_block_info_to_reset = None; + let current_l2_block_info = + rt_handle.block_on(read_l2_block_info(&mut connection, state_l2_block_number)); + let next_l2_block_info = if block_args.is_pending_miniblock() { + L2BlockEnv { + number: current_l2_block_info.l2_block_number + 1, + timestamp: l1_batch_timestamp, + prev_block_hash: current_l2_block_info.l2_block_hash, + // For simplicity we assume each miniblock create one virtual block. + // This may be wrong only during transition period. + max_virtual_blocks_to_create: 1, + } + } else if current_l2_block_info.l2_block_number == 0 { + // Special case: + // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. + // - Otherwise, we need to put actual values here. We cannot create next l2 block with block_number=0 and max_virtual_blocks_to_create=0 + // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. + L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), + max_virtual_blocks_to_create: 1, + } + } else { + // We need to reset L2 block info in storage to process transaction in the current block context. + // Actual resetting will be done after `storage_view` is created. + let prev_l2_block_info = rt_handle.block_on(read_l2_block_info( + &mut connection, + state_l2_block_number - 1, + )); + l2_block_info_to_reset = Some(prev_l2_block_info); + L2BlockEnv { + number: current_l2_block_info.l2_block_number, + timestamp: current_l2_block_info.l2_block_timestamp, + prev_block_hash: prev_l2_block_info.l2_block_hash, + max_virtual_blocks_to_create: 1, + } + }; + + let storage = PostgresStorage::new(rt_handle.clone(), connection, state_l2_block_number, false) .with_caches(shared_args.caches); - // Moving `storage_read_cache` to `storage_view`. It will be moved back once execution is finished and `storage_view` is not needed. - let mut storage_view = StorageView::new_with_read_keys(storage, storage_read_cache); + let mut storage_view = StorageView::new(storage); let storage_view_setup_started_at = Instant::now(); if let Some(nonce) = execution_args.enforced_nonce { @@ -95,47 +133,79 @@ pub(super) fn apply_vm_in_sandbox( let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); current_balance += execution_args.added_balance; storage_view.set_value(balance_key, u256_to_h256(current_balance)); + + // Reset L2 block info. + if let Some(l2_block_info_to_reset) = l2_block_info_to_reset { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = pack_block_info( + l2_block_info_to_reset.l2_block_number as u64, + l2_block_info_to_reset.l2_block_timestamp, + ); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + storage_view.set_value( + l2_block_txs_rolling_hash_key, + l2_block_info_to_reset.txs_rolling_hash, + ); + } + let storage_view_setup_time = storage_view_setup_started_at.elapsed(); // We don't want to emit too many logs. if storage_view_setup_time > Duration::from_millis(10) { - vlog::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); } - let mut oracle_tools = vm::OracleTools::new(&mut storage_view, HistoryDisabled); - let block_properties = BlockProperties { - default_aa_code_hash: h256_to_u256(shared_args.base_system_contracts.default_aa.hash), - zkporter_is_available: ZKPORTER_IS_AVAILABLE, - }; let TxSharedArgs { + operator_account, l1_gas_price, fair_l2_gas_price, + base_system_contracts, + validation_computational_gas_limit, + chain_id, .. } = shared_args; - let block_context = DerivedBlockContext { - context: BlockContext { - block_number: vm_block_number.0, - block_timestamp, - l1_gas_price, - fair_l2_gas_price, - operator_address: *shared_args.operator_account.address(), - }, - base_fee: execution_args.enforced_base_fee.unwrap_or_else(|| { - derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price).0 - }), + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: protocol_version, + base_system_smart_contracts: base_system_contracts + .get_by_protocol_version(protocol_version), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: execution_args.execution_mode, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, }; - // Since this method assumes that the block vm_block_number-1 is present in the DB, it means that its hash - // has already been stored in the VM. - let block_context_properties = BlockContextMode::OverrideCurrent(block_context); + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: vm_l1_batch_number, + timestamp: l1_batch_timestamp, + l1_gas_price, + fair_l2_gas_price, + fee_account: *operator_account.address(), + enforced_base_fee: execution_args.enforced_base_fee, + first_l2_block: next_l2_block_info, + }; - let mut vm = init_vm( - &mut oracle_tools, - block_context_properties, - &block_properties, - execution_args.execution_mode, - &shared_args.base_system_contracts, + let storage_view = storage_view.to_rc_ptr(); + let mut initial_version = VmInstanceData::new_for_specific_vm_version( + storage_view.clone(), + &system_env, + HistoryDisabled, + protocol_version.into_api_vm_version(), ); + let mut vm = Box::new(VmInstance::new( + l1_batch_env, + system_env, + &mut initial_version, + )); metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "initialization"); span.exit(); @@ -150,70 +220,140 @@ pub(super) fn apply_vm_in_sandbox( let vm_execution_took = stage_started_at.elapsed(); metrics::histogram!("api.web3.sandbox", vm_execution_took, "stage" => "execution"); - let oracles_sizes = vm_metrics::record_vm_memory_metrics(&vm); - vm_metrics::report_storage_view_metrics( - &tx_id, - oracles_sizes, - vm_execution_took, - storage_view.metrics(), - ); + let memory_metrics = vm.record_vm_memory_metrics(); + if let Some(memory_metrics) = memory_metrics { + vm_metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + storage_view.as_ref().borrow_mut().metrics(), + ); + } drop(vm_permit); // Ensure that the permit lives until this point - // Move `read_storage_keys` from `storage_view` back to cache. - (result, storage_view.into_read_storage_keys()) + result } -impl BlockArgs { - fn is_pending_miniblock(&self) -> bool { - matches!( - self.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ) +#[derive(Debug, Clone, Copy)] +struct StoredL2BlockInfo { + pub l2_block_number: u32, + pub l2_block_timestamp: u64, + pub l2_block_hash: H256, + pub txs_rolling_hash: H256, +} + +async fn read_l2_block_info( + connection: &mut StorageProcessor<'_>, + miniblock_number: MiniblockNumber, +) -> StoredL2BlockInfo { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = connection + .storage_web3_dal() + .get_historical_value_unchecked(&l2_block_info_key, miniblock_number) + .await + .unwrap(); + let (l2_block_number, l2_block_timestamp) = unpack_block_info(h256_to_u256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let txs_rolling_hash = connection + .storage_web3_dal() + .get_historical_value_unchecked(&l2_block_txs_rolling_hash_key, miniblock_number) + .await + .unwrap(); + + let l2_block_hash = connection + .blocks_web3_dal() + .get_miniblock_hash(miniblock_number) + .await + .unwrap() + .unwrap(); + + StoredL2BlockInfo { + l2_block_number: l2_block_number as u32, + l2_block_timestamp, + l2_block_hash, + txs_rolling_hash, } +} + +#[derive(Debug)] +struct ResolvedBlockInfo { + pub state_l2_block_number: MiniblockNumber, + pub vm_l1_batch_number: L1BatchNumber, + pub l1_batch_timestamp: u64, + pub protocol_version: ProtocolVersionId, +} - fn resolves_to_latest_sealed_miniblock(&self) -> bool { +impl BlockArgs { + pub(crate) fn is_pending_miniblock(&self) -> bool { matches!( self.block_id, - api::BlockId::Number( - api::BlockNumber::Pending | api::BlockNumber::Latest | api::BlockNumber::Committed - ) + api::BlockId::Number(api::BlockNumber::Pending) ) } - async fn resolve_block_numbers( + async fn resolve_block_info( &self, connection: &mut StorageProcessor<'_>, - ) -> Result<(MiniblockNumber, L1BatchNumber), SqlxError> { - Ok(if self.is_pending_miniblock() { - let sealed_l1_batch_number = connection - .blocks_web3_dal() - .get_sealed_l1_batch_number() - .await?; - let sealed_miniblock_number = connection - .blocks_web3_dal() - .get_sealed_miniblock_number() - .await?; - (sealed_miniblock_number, sealed_l1_batch_number + 1) - } else { - let l1_batch_number = connection - .storage_web3_dal() - .resolve_l1_batch_number_of_miniblock(self.resolved_block_number) - .await? - .expected_l1_batch(); - (self.resolved_block_number, l1_batch_number) - }) - } + ) -> Result { + let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp) = + if self.is_pending_miniblock() { + let sealed_l1_batch_number = connection + .blocks_web3_dal() + .get_sealed_l1_batch_number() + .await?; + let sealed_miniblock_header = connection + .blocks_dal() + .get_last_sealed_miniblock_header() + .await + .expect("At least one miniblock must exist"); - fn block_timestamp_seconds(&self) -> u64 { - if self.is_pending_miniblock() { - seconds_since_epoch() - } else { - self.block_timestamp_s.unwrap_or_else(|| { - panic!( - "Block timestamp is `None`, `block_id`: {:?}, `resolved_block_number`: {}", + // Timestamp of the next L1 batch must be greater than the timestamp of the last miniblock. + let l1_batch_timestamp = + seconds_since_epoch().max(sealed_miniblock_header.timestamp + 1); + ( + sealed_miniblock_header.number, + sealed_l1_batch_number + 1, + l1_batch_timestamp, + ) + } else { + let l1_batch_number = connection + .storage_web3_dal() + .resolve_l1_batch_number_of_miniblock(self.resolved_block_number) + .await? + .expected_l1_batch(); + let l1_batch_timestamp = self.l1_batch_timestamp_s.unwrap_or_else(|| { + panic!( + "L1 batch timestamp is `None`, `block_id`: {:?}, `resolved_block_number`: {}", self.block_id, self.resolved_block_number.0 ); - }) - } + }); + + ( + self.resolved_block_number, + l1_batch_number, + l1_batch_timestamp, + ) + }; + + // Blocks without version specified are considered to be of `Version9`. + // TODO: remove `unwrap_or` when protocol version ID will be assigned for each block. + let protocol_version = connection + .blocks_dal() + .get_miniblock_protocol_version_id(state_l2_block_number) + .await + .unwrap_or(ProtocolVersionId::Version9); + Ok(ResolvedBlockInfo { + state_l2_block_number, + vm_l1_batch_number, + l1_batch_timestamp, + protocol_version, + }) } } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs index 4b67d987ebcf..abc50af37a5f 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs @@ -1,6 +1,6 @@ use thiserror::Error; -use vm::TxRevertReason; +use vm::{Halt, TxRevertReason}; #[derive(Debug, Error)] pub(crate) enum SandboxExecutionError { @@ -27,57 +27,52 @@ pub(crate) enum SandboxExecutionError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), - #[error("Transaction is unexecutable. Reason: {0}")] - Unexecutable(String), } -impl From for SandboxExecutionError { - fn from(reason: TxRevertReason) -> Self { - match reason { - TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert( - reason.to_user_friendly_string(), - reason.encoded_data(), - ), - TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert( - reason.to_user_friendly_string(), - reason.encoded_data(), - ), - TxRevertReason::FailedToChargeFee(reason) => { - SandboxExecutionError::FailedToChargeFee(reason.to_string()) - } - TxRevertReason::FromIsNotAnAccount => SandboxExecutionError::FromIsNotAnAccount, - TxRevertReason::InnerTxError => SandboxExecutionError::InnerTxError, - TxRevertReason::Unknown(reason) => { - SandboxExecutionError::BootloaderFailure(reason.to_string()) - } - TxRevertReason::ValidationFailed(reason) => { - SandboxExecutionError::AccountValidationFailed(reason.to_string()) - } - TxRevertReason::PaymasterValidationFailed(reason) => { - SandboxExecutionError::PaymasterValidationFailed(reason.to_string()) +impl From for SandboxExecutionError { + fn from(value: Halt) -> Self { + match value { + Halt::FailedToChargeFee(reason) => Self::FailedToChargeFee(reason.to_string()), + Halt::FromIsNotAnAccount => Self::FromIsNotAnAccount, + Halt::InnerTxError => Self::InnerTxError, + Halt::Unknown(reason) => Self::BootloaderFailure(reason.to_string()), + Halt::ValidationFailed(reason) => Self::AccountValidationFailed(reason.to_string()), + Halt::PaymasterValidationFailed(reason) => { + Self::PaymasterValidationFailed(reason.to_string()) } - TxRevertReason::PrePaymasterPreparationFailed(reason) => { - SandboxExecutionError::PrePaymasterPreparationFailed(reason.to_string()) + Halt::PrePaymasterPreparationFailed(reason) => { + Self::PrePaymasterPreparationFailed(reason.to_string()) } - TxRevertReason::UnexpectedVMBehavior(reason) => { - SandboxExecutionError::UnexpectedVMBehavior(reason) + Halt::UnexpectedVMBehavior(reason) => Self::UnexpectedVMBehavior(reason), + Halt::BootloaderOutOfGas => { + Self::UnexpectedVMBehavior("bootloader is out of gas".to_string()) } - TxRevertReason::BootloaderOutOfGas => { - SandboxExecutionError::UnexpectedVMBehavior("bootloader is out of gas".to_string()) - } - TxRevertReason::NotEnoughGasProvided => SandboxExecutionError::UnexpectedVMBehavior( + Halt::NotEnoughGasProvided => Self::UnexpectedVMBehavior( "The bootloader did not contain enough gas to execute the transaction".to_string(), ), - revert_reason @ TxRevertReason::FailedToMarkFactoryDependencies(_) => { - SandboxExecutionError::Revert(revert_reason.to_string(), vec![]) - } - TxRevertReason::PayForTxFailed(reason) => { - SandboxExecutionError::FailedToPayForTransaction(reason.to_string()) + revert_reason @ Halt::FailedToMarkFactoryDependencies(_) => { + Self::Revert(revert_reason.to_string(), vec![]) } - TxRevertReason::TooBigGasLimit => { - SandboxExecutionError::Revert(TxRevertReason::TooBigGasLimit.to_string(), vec![]) + Halt::PayForTxFailed(reason) => Self::FailedToPayForTransaction(reason.to_string()), + Halt::TooBigGasLimit => Self::Revert(Halt::TooBigGasLimit.to_string(), vec![]), + Halt::MissingInvocationLimitReached => Self::InnerTxError, + Halt::VMPanic => Self::UnexpectedVMBehavior("VM panic".to_string()), + Halt::FailedToSetL2Block(reason) => SandboxExecutionError::Revert(reason, vec![]), + Halt::FailedToAppendTransactionToL2Block(reason) => { + SandboxExecutionError::Revert(reason, vec![]) } - TxRevertReason::MissingInvocationLimitReached => SandboxExecutionError::InnerTxError, + } + } +} + +impl From for SandboxExecutionError { + fn from(reason: TxRevertReason) -> Self { + match reason { + TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert( + reason.to_user_friendly_string(), + reason.encoded_data(), + ), + TxRevertReason::Halt(halt) => SandboxExecutionError::from(halt), } } } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs index 8f083a0022c3..ec3079a06b3e 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -2,22 +2,18 @@ use tracing::{span, Level}; -use std::{collections::HashMap, mem}; - +use multivm::MultivmTracer; use vm::{ - utils::ETH_CALL_GAS_LIMIT, - vm_with_bootloader::{ - push_transaction_to_bootloader_memory, BootloaderJobType, TxExecutionMode, - }, - VmExecutionResult, + constants::ETH_CALL_GAS_LIMIT, StorageInvocations, TxExecutionMode, VmExecutionResultAndLogs, }; use zksync_dal::ConnectionPool; + use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, StorageKey, - Transaction, H256, U256, + fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, + PackedEthSignature, Transaction, U256, }; -use super::{apply, error::SandboxExecutionError, vm_metrics, BlockArgs, TxSharedArgs, VmPermit}; +use super::{apply, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, VmPermit}; #[derive(Debug)] pub(crate) struct TxExecutionArgs { @@ -25,6 +21,7 @@ pub(crate) struct TxExecutionArgs { pub enforced_nonce: Option, pub added_balance: U256, pub enforced_base_fee: Option, + pub missed_storage_invocation_limit: usize, } impl TxExecutionArgs { @@ -34,6 +31,7 @@ impl TxExecutionArgs { enforced_nonce: Some(tx.nonce()), added_balance: U256::zero(), enforced_base_fee: Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + missed_storage_invocation_limit: usize::MAX, } } @@ -43,12 +41,11 @@ impl TxExecutionArgs { ) -> Self { let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); Self { - execution_mode: TxExecutionMode::EthCall { - missed_storage_invocation_limit, - }, + execution_mode: TxExecutionMode::EthCall, enforced_nonce: None, added_balance: U256::zero(), enforced_base_fee: Some(enforced_base_fee), + missed_storage_invocation_limit, } } @@ -67,9 +64,8 @@ impl TxExecutionArgs { }; Self { - execution_mode: TxExecutionMode::EstimateFee { - missed_storage_invocation_limit, - }, + execution_mode: TxExecutionMode::EstimateFee, + missed_storage_invocation_limit, enforced_nonce: tx.nonce(), added_balance, enforced_base_fee: Some(base_fee), @@ -84,12 +80,16 @@ pub(crate) async fn execute_tx_eth_call( mut tx: L2Tx, block_args: BlockArgs, vm_execution_cache_misses_limit: Option, - trace_call: bool, -) -> Result { + custom_tracers: Vec, +) -> VmExecutionResultAndLogs { let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64(); let execution_args = TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit); + if tx.common_data.signature.is_empty() { + tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + // Protection against infinite-loop eth_calls and alike: // limiting the amount of gas the call can use. // We can't use BLOCK_ERGS_LIMIT here since the VM itself has some overhead. @@ -101,9 +101,7 @@ pub(crate) async fn execute_tx_eth_call( connection_pool, tx.into(), block_args, - BootloaderJobType::TransactionExecution, - trace_call, - &mut HashMap::new(), + custom_tracers, ) .await; @@ -117,11 +115,7 @@ pub(crate) async fn execute_tx_with_pending_state( execution_args: TxExecutionArgs, connection_pool: ConnectionPool, tx: Transaction, - storage_read_cache: &mut HashMap, -) -> ( - Result, - TransactionExecutionMetrics, -) { +) -> (VmExecutionResultAndLogs, TransactionExecutionMetrics) { let mut connection = connection_pool.access_storage_tagged("api").await; let block_args = BlockArgs::pending(&mut connection).await; drop(connection); @@ -136,9 +130,7 @@ pub(crate) async fn execute_tx_with_pending_state( connection_pool, tx, block_args, - BootloaderJobType::TransactionExecution, - false, - storage_read_cache, + vec![], ) .await } @@ -154,23 +146,16 @@ async fn execute_tx_in_sandbox( connection_pool: ConnectionPool, tx: Transaction, block_args: BlockArgs, - job_type: BootloaderJobType, - trace_call: bool, - storage_read_cache: &mut HashMap, -) -> ( - Result, - TransactionExecutionMetrics, -) { + custom_tracers: Vec, +) -> (VmExecutionResultAndLogs, TransactionExecutionMetrics) { let total_factory_deps = tx .execute .factory_deps .as_ref() .map_or(0, |deps| deps.len() as u16); - let moved_cache = mem::take(storage_read_cache); - let (execution_result, moved_cache) = tokio::task::spawn_blocking(move || { + let execution_result = tokio::task::spawn_blocking(move || { let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); - let execution_mode = execution_args.execution_mode; let result = apply::apply_vm_in_sandbox( vm_permit, shared_args, @@ -178,15 +163,16 @@ async fn execute_tx_in_sandbox( &connection_pool, tx, block_args, - moved_cache, |vm, tx| { - push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); - let result = if trace_call { - vm.execute_till_block_end_with_call_tracer(job_type) - } else { - vm.execute_till_block_end(job_type) - }; - result.full_result + vm.push_transaction(&tx); + let storage_invocation_tracer = + StorageInvocations::new(execution_args.missed_storage_invocation_limit); + let custom_tracers: Vec<_> = custom_tracers + .into_iter() + .map(|tracer| tracer.into_boxed()) + .chain(vec![storage_invocation_tracer.into_boxed()]) + .collect(); + vm.inspect_next_transaction(custom_tracers) }, ); span.exit(); @@ -195,13 +181,7 @@ async fn execute_tx_in_sandbox( .await .unwrap(); - *storage_read_cache = moved_cache; - let tx_execution_metrics = vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); - let result = match execution_result.revert_reason { - None => Ok(execution_result), - Some(revert) => Err(revert.revert_reason.into()), - }; - (result, tx_execution_metrics) + (execution_result, tx_execution_metrics) } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs index 2fe593c57e21..675512b5a167 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -1,28 +1,28 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; use tokio::runtime::Handle; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; - -use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; +use vm::utils::fee::derive_base_fee_and_gas_per_pubdata; use zksync_config::constants::PUBLISH_BYTECODE_OVERHEAD; -use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; -use zksync_types::{api, AccountTreeId, MiniblockNumber, U256}; +use zksync_types::{api, AccountTreeId, L2ChainId, MiniblockNumber, U256}; use zksync_utils::bytecode::{compress_bytecode, hash_bytecode}; +use super::tx_sender::MultiVMBaseSystemContracts; + // Note: keep the modules private, and instead re-export functions that make public interface. mod apply; mod error; mod execute; +mod tracers; mod validate; mod vm_metrics; pub(super) use self::{ error::SandboxExecutionError, execute::{execute_tx_eth_call, execute_tx_with_pending_state, TxExecutionArgs}, + tracers::ApiTracer, }; /// Permit to invoke VM code. @@ -54,7 +54,7 @@ impl VmConcurrencyBarrier { /// Shuts down the related VM concurrency limiter so that it won't issue new permits. pub fn close(&self) { self.limiter.close(); - vlog::info!("VM concurrency limiter closed"); + tracing::info!("VM concurrency limiter closed"); } /// Waits until all permits issued by the VM concurrency limiter are dropped. @@ -68,7 +68,7 @@ impl VmConcurrencyBarrier { loop { let current_permits = self.limiter.available_permits(); - vlog::debug!( + tracing::debug!( "Waiting until all VM permits are dropped; currently remaining: {} / {}", self.max_concurrency - current_permits, self.max_concurrency @@ -100,7 +100,7 @@ pub struct VmConcurrencyLimiter { impl VmConcurrencyLimiter { /// Creates a limiter together with a barrier allowing to control its shutdown. pub fn new(max_concurrency: usize) -> (Self, VmConcurrencyBarrier) { - vlog::info!( + tracing::info!( "Initializing the VM concurrency limiter with max concurrency {max_concurrency}" ); let limiter = Arc::new(tokio::sync::Semaphore::new(max_concurrency)); @@ -130,7 +130,7 @@ impl VmConcurrencyLimiter { let elapsed = start.elapsed(); // We don't want to emit too many logs. if elapsed > Duration::from_millis(10) { - vlog::debug!( + tracing::debug!( "Permit is obtained. Available permits: {available_permits}. Took {elapsed:?}" ); } @@ -225,8 +225,10 @@ pub(crate) struct TxSharedArgs { pub operator_account: AccountTreeId, pub l1_gas_price: u64, pub fair_l2_gas_price: u64, - pub base_system_contracts: BaseSystemContracts, + pub base_system_contracts: MultiVMBaseSystemContracts, pub caches: PostgresStorageCaches, + pub validation_computational_gas_limit: u32, + pub chain_id: L2ChainId, } /// Information about a block provided to VM. @@ -234,7 +236,7 @@ pub(crate) struct TxSharedArgs { pub(crate) struct BlockArgs { block_id: api::BlockId, resolved_block_number: MiniblockNumber, - block_timestamp_s: Option, + l1_batch_timestamp_s: Option, } impl BlockArgs { @@ -243,7 +245,7 @@ impl BlockArgs { Self { block_id, resolved_block_number, - block_timestamp_s: None, + l1_batch_timestamp_s: None, } } @@ -252,6 +254,10 @@ impl BlockArgs { connection: &mut StorageProcessor<'_>, block_id: api::BlockId, ) -> Result, SqlxError> { + if block_id == api::BlockId::Number(api::BlockNumber::Pending) { + return Ok(Some(BlockArgs::pending(connection).await)); + } + let resolved_block_number = connection .blocks_web3_dal() .resolve_block_id(block_id) @@ -260,14 +266,36 @@ impl BlockArgs { return Ok(None); }; - let block_timestamp_s = connection + let l1_batch_number = connection + .storage_web3_dal() + .resolve_l1_batch_number_of_miniblock(resolved_block_number) + .await? + .expected_l1_batch(); + let l1_batch_timestamp_s = connection .blocks_web3_dal() - .get_block_timestamp(resolved_block_number) + .get_expected_l1_batch_timestamp(l1_batch_number) .await?; + assert!( + l1_batch_timestamp_s.is_some(), + "Missing batch timestamp for non-pending block" + ); Ok(Some(Self { block_id, resolved_block_number, - block_timestamp_s, + l1_batch_timestamp_s, })) } + + pub fn resolved_block_number(&self) -> MiniblockNumber { + self.resolved_block_number + } + + pub fn resolves_to_latest_sealed_miniblock(&self) -> bool { + matches!( + self.block_id, + api::BlockId::Number( + api::BlockNumber::Pending | api::BlockNumber::Latest | api::BlockNumber::Committed + ) + ) + } } diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/tracers.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/tracers.rs new file mode 100644 index 000000000000..ad6a65f1373f --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/tracers.rs @@ -0,0 +1,22 @@ +use multivm::MultivmTracer; +use once_cell::sync::OnceCell; +use std::sync::Arc; +use vm::{CallTracer, HistoryMode}; +use zksync_state::WriteStorage; +use zksync_types::vm_trace::Call; + +/// Custom tracers supported by our api +#[derive(Debug)] +pub(crate) enum ApiTracer { + CallTracer(Arc>>), +} + +impl ApiTracer { + pub fn into_boxed( + self, + ) -> Box> { + match self { + ApiTracer::CallTracer(tracer) => CallTracer::new(tracer, H::default()).into_boxed(), + } + } +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs index 0b09d03cdbfe..8e4138b5e702 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -1,11 +1,12 @@ -use std::{ - collections::{HashMap, HashSet}, - time::Instant, -}; +use std::{collections::HashSet, time::Instant}; -use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; -use vm::vm_with_bootloader::push_transaction_to_bootloader_memory; +use multivm::MultivmTracer; +use vm::{ + ExecutionResult, HistoryDisabled, StorageInvocations, ValidationError, ValidationTracer, + ValidationTracerParams, +}; use zksync_dal::{ConnectionPool, StorageProcessor}; + use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, U256}; use super::{ @@ -59,9 +60,9 @@ impl TxSharedArgs { drop(connection); let execution_args = TxExecutionArgs::for_validation(&tx); - let execution_mode = execution_args.execution_mode; let tx: Transaction = tx.into(); - let (validation_result, _) = tokio::task::spawn_blocking(move || { + + let validation_result = tokio::task::spawn_blocking(move || { let span = tracing::debug_span!("validate_in_sandbox").entered(); let result = apply::apply_vm_in_sandbox( vm_permit, @@ -70,12 +71,26 @@ impl TxSharedArgs { &connection_pool, tx, block_args, - HashMap::new(), |vm, tx| { let stage_started_at = Instant::now(); let span = tracing::debug_span!("validation").entered(); - push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); - let result = vm.execute_validation(validation_params); + vm.push_transaction(&tx); + + let (tracer, validation_result) = ValidationTracer::::new( + validation_params, + ); + + let result = vm.inspect_next_transaction(vec![ + tracer.into_boxed(), + StorageInvocations::new(execution_args.missed_storage_invocation_limit).into_boxed() + ]); + + let result = match (result.result, validation_result.get()) { + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), + (_, Some(err)) => Err(ValidationError::ViolatedRule(err.clone())), + (_, None) => Ok(()), + }; + metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "validation"); span.exit(); diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs index fc7e06cf632f..e0d7d3ea7be9 100644 --- a/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs @@ -1,50 +1,53 @@ -//! Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs -//! or reporting the VM memory usage. - use std::time::Duration; - -use vm::{HistoryMode, VmExecutionResult, VmInstance}; +use vm::{VmExecutionResultAndLogs, VmMemoryMetrics}; use zksync_state::StorageViewMetrics; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - fee::TransactionExecutionMetrics, - storage_writes_deduplicator::StorageWritesDeduplicator, -}; +use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; +use zksync_types::fee::TransactionExecutionMetrics; +use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; use zksync_utils::bytecode::bytecode_len_in_bytes; -pub(super) fn report_storage_view_metrics( +pub fn report_vm_memory_metrics( tx_id: &str, - oracles_sizes: usize, + memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - metrics: StorageViewMetrics, + storage_metrics: StorageViewMetrics, ) { + metrics::histogram!("runtime_context.memory.event_sink_size", memory_metrics.event_sink_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.event_sink_size", memory_metrics.event_sink_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.memory_size", memory_metrics.memory_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.memory_size", memory_metrics.memory_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.decommitter_size", memory_metrics.decommittment_processor_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.decommitter_size", memory_metrics.decommittment_processor_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.storage_size", memory_metrics.storage_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.storage_size", memory_metrics.storage_history as f64, "type" => "history"); + metrics::histogram!( "runtime_context.memory.storage_view_cache_size", - metrics.cache_size as f64 + storage_metrics.cache_size as f64 ); metrics::histogram!( "runtime_context.memory", - (oracles_sizes + metrics.cache_size) as f64 + (memory_metrics.full_size() + storage_metrics.cache_size) as f64 ); - let total_storage_invocations = - metrics.get_value_storage_invocations + metrics.set_value_storage_invocations; + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; let total_time_spent_in_storage = - metrics.time_spent_on_get_value + metrics.time_spent_on_set_value; + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; metrics::histogram!( "runtime_context.storage_interaction.amount", - metrics.storage_invocations_missed as f64, + storage_metrics.storage_invocations_missed as f64, "interaction" => "missed" ); metrics::histogram!( "runtime_context.storage_interaction.amount", - metrics.get_value_storage_invocations as f64, + storage_metrics.get_value_storage_invocations as f64, "interaction" => "get_value" ); metrics::histogram!( "runtime_context.storage_interaction.amount", - metrics.set_value_storage_invocations as f64, + storage_metrics.set_value_storage_invocations as f64, "interaction" => "set_value" ); metrics::histogram!( @@ -55,17 +58,17 @@ pub(super) fn report_storage_view_metrics( metrics::histogram!( "runtime_context.storage_interaction.duration", - metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_storage_missed, "interaction" => "missed" ); metrics::histogram!( "runtime_context.storage_interaction.duration", - metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_get_value, "interaction" => "get_value" ); metrics::histogram!( "runtime_context.storage_interaction.duration", - metrics.time_spent_on_set_value, + storage_metrics.time_spent_on_set_value, "interaction" => "set_value" ); metrics::histogram!( @@ -81,10 +84,10 @@ pub(super) fn report_storage_view_metrics( "interaction" => "total" ); } - if metrics.storage_invocations_missed > 0 { - let duration_per_unit = metrics + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics .time_spent_on_storage_missed - .div_f64(metrics.storage_invocations_missed as f64); + .div_f64(storage_metrics.storage_invocations_missed as f64); metrics::histogram!( "runtime_context.storage_interaction.duration_per_unit", duration_per_unit, @@ -100,92 +103,55 @@ pub(super) fn report_storage_view_metrics( const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - vlog::info!( + tracing::info!( "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ {} get_value_storage_invocations, {} set_value_storage_invocations, \ vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ (missed: {:?} get: {:?} set: {:?})", - metrics.storage_invocations_missed, - metrics.get_value_storage_invocations, - metrics.set_value_storage_invocations, - metrics.time_spent_on_storage_missed, - metrics.time_spent_on_get_value, - metrics.time_spent_on_set_value, + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, ); } } pub(super) fn collect_tx_execution_metrics( contracts_deployed: u16, - result: &VmExecutionResult, + result: &VmExecutionResultAndLogs, ) -> TransactionExecutionMetrics { + let writes_metrics = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); let event_topics = result + .logs .events .iter() .map(|event| event.indexed_topics.len() as u16) .sum(); - - let l2_l1_long_messages = extract_long_l2_to_l1_messages(&result.events) + let l2_l1_long_messages = extract_long_l2_to_l1_messages(&result.logs.events) .iter() .map(|event| event.len()) .sum(); - - let published_bytecode_bytes = extract_published_bytecodes(&result.events) + let published_bytecode_bytes = extract_published_bytecodes(&result.logs.events) .iter() .map(|bytecode_hash| bytecode_len_in_bytes(*bytecode_hash)) .sum(); - let writes_metrics = - StorageWritesDeduplicator::apply_on_empty_state(&result.storage_log_queries); - TransactionExecutionMetrics { initial_storage_writes: writes_metrics.initial_storage_writes, repeated_storage_writes: writes_metrics.repeated_storage_writes, - gas_used: result.gas_used as usize, + gas_used: result.statistics.gas_used as usize, event_topics, published_bytecode_bytes, l2_l1_long_messages, - l2_l1_logs: result.l2_to_l1_logs.len(), - contracts_used: result.contracts_used, + l2_l1_logs: result.logs.l2_to_l1_logs.len(), + contracts_used: result.statistics.contracts_used, contracts_deployed, - vm_events: result.events.len(), - storage_logs: result.storage_log_queries.len(), - total_log_queries: result.total_log_queries, - cycles_used: result.cycles_used, - computational_gas_used: result.computational_gas_used, + vm_events: result.logs.events.len(), + storage_logs: result.logs.storage_logs.len(), + total_log_queries: result.statistics.total_log_queries, + cycles_used: result.statistics.cycles_used, + computational_gas_used: result.statistics.computational_gas_used, } } - -/// Returns the sum of all oracles' sizes. -pub(super) fn record_vm_memory_metrics(vm: &VmInstance<'_, H>) -> usize { - let event_sink_inner = vm.state.event_sink.get_size(); - let event_sink_history = vm.state.event_sink.get_history_size(); - let memory_inner = vm.state.memory.get_size(); - let memory_history = vm.state.memory.get_history_size(); - let decommittment_processor_inner = vm.state.decommittment_processor.get_size(); - let decommittment_processor_history = vm.state.decommittment_processor.get_history_size(); - let storage_inner = vm.state.storage.get_size(); - let storage_history = vm.state.storage.get_history_size(); - - metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.memory_size", memory_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.memory_size", memory_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.storage_size", storage_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.storage_size", storage_history as f64, "type" => "history"); - - [ - event_sink_inner, - event_sink_history, - memory_inner, - memory_history, - decommittment_processor_inner, - decommittment_processor_history, - storage_inner, - storage_history, - ] - .iter() - .sum::() -} diff --git a/core/bin/zksync_core/src/api_server/healthcheck.rs b/core/bin/zksync_core/src/api_server/healthcheck.rs index af3409f9f4e1..74495f3439cb 100644 --- a/core/bin/zksync_core/src/api_server/healthcheck.rs +++ b/core/bin/zksync_core/src/api_server/healthcheck.rs @@ -26,13 +26,13 @@ async fn run_server( for check in &health_checks { let health_check_name = check.name(); if !health_check_names.insert(health_check_name) { - vlog::warn!( + tracing::warn!( "Health check with name `{health_check_name}` is defined multiple times; only the last mention \ will be present in `/health` endpoint output" ); } } - vlog::debug!( + tracing::debug!( "Starting healthcheck server with checks {health_check_names:?} on {bind_address}" ); @@ -45,13 +45,13 @@ async fn run_server( .serve(app.into_make_service()) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { - vlog::warn!("Stop signal sender for healthcheck server was dropped without sending a signal"); + tracing::warn!("Stop signal sender for healthcheck server was dropped without sending a signal"); } - vlog::info!("Stop signal received, healthcheck server is shutting down"); + tracing::info!("Stop signal received, healthcheck server is shutting down"); }) .await .expect("Healthcheck server failed"); - vlog::info!("Healthcheck server shut down"); + tracing::info!("Healthcheck server shut down"); } #[derive(Debug)] @@ -84,7 +84,7 @@ impl HealthCheckHandle { // Propagate potential panics from the server task. server_result.unwrap(); } else { - vlog::debug!("Timed out {GRACEFUL_SHUTDOWN_WAIT:?} waiting for healthcheck server to gracefully shut down"); + tracing::debug!("Timed out {GRACEFUL_SHUTDOWN_WAIT:?} waiting for healthcheck server to gracefully shut down"); } } } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs index c462f0cf8423..3fa349b304d8 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs @@ -9,15 +9,19 @@ use governor::{ }; // Built-in uses -use std::{cmp, collections::HashMap, num::NonZeroU32, sync::Arc, time::Instant}; +use std::{cmp, num::NonZeroU32, sync::Arc, time::Instant}; // Workspace uses + use vm::{ - transaction_data::{derive_overhead, OverheadCoeficients}, - vm_with_bootloader::derive_base_fee_and_gas_per_pubdata, - zk_evm::zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, - VmExecutionResult, + constants::{BLOCK_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK}, + utils::{ + fee::derive_base_fee_and_gas_per_pubdata, + overhead::{derive_overhead, OverheadCoeficients}, + }, + VmExecutionResultAndLogs, }; + use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; @@ -28,29 +32,69 @@ use zksync_types::{ l2::error::TxCheckError::TxDuplication, l2::L2Tx, utils::storage_key_for_eth_balance, - AccountTreeId, Address, ExecuteTransactionCommon, Nonce, StorageKey, Transaction, H160, H256, - MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, + AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, + ProtocolVersionId, Transaction, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, + MAX_NEW_FACTORY_DEPS, U256, }; + use zksync_utils::h256_to_u256; // Local uses -use crate::api_server::execution_sandbox::{ - adjust_l1_gas_price_for_tx, execute_tx_eth_call, execute_tx_with_pending_state, - get_pubdata_for_factory_deps, BlockArgs, SandboxExecutionError, TxExecutionArgs, TxSharedArgs, - VmConcurrencyLimiter, VmPermit, +use crate::api_server::{ + execution_sandbox::{ + adjust_l1_gas_price_for_tx, execute_tx_eth_call, execute_tx_with_pending_state, + get_pubdata_for_factory_deps, BlockArgs, TxExecutionArgs, TxSharedArgs, + VmConcurrencyLimiter, VmPermit, + }, + tx_sender::result::ApiCallResult, }; + use crate::l1_gas_price::L1GasPriceProvider; use crate::state_keeper::seal_criteria::{ConditionalSealer, SealData}; -mod error; mod proxy; +mod result; -pub(super) use self::{error::SubmitTxError, proxy::TxProxy}; +pub(super) use self::{proxy::TxProxy, result::SubmitTxError}; /// Type alias for the rate limiter implementation. type TxSenderRateLimiter = RateLimiter>; +#[derive(Debug, Clone)] +pub struct MultiVMBaseSystemContracts { + /// Contracts to be used for pre-virtual-blocks protocol versions. + pub(crate) pre_virtual_blocks: BaseSystemContracts, + /// Contracts to be used for post-virtual-blocks protocol versions. + pub(crate) post_virtual_blocks: BaseSystemContracts, + /// Contracts to be used for protocol versions after virtual block upgrade fix. + pub(crate) post_virtual_blocks_finish_upgrade_fix: BaseSystemContracts, +} + +impl MultiVMBaseSystemContracts { + pub fn get_by_protocol_version(self, version: ProtocolVersionId) -> BaseSystemContracts { + match version { + ProtocolVersionId::Version0 + | ProtocolVersionId::Version1 + | ProtocolVersionId::Version2 + | ProtocolVersionId::Version3 + | ProtocolVersionId::Version4 + | ProtocolVersionId::Version5 + | ProtocolVersionId::Version6 + | ProtocolVersionId::Version7 + | ProtocolVersionId::Version8 + | ProtocolVersionId::Version9 + | ProtocolVersionId::Version10 + | ProtocolVersionId::Version11 + | ProtocolVersionId::Version12 => self.pre_virtual_blocks, + ProtocolVersionId::Version13 => self.post_virtual_blocks, + ProtocolVersionId::Version14 + | ProtocolVersionId::Version15 + | ProtocolVersionId::Version16 => self.post_virtual_blocks_finish_upgrade_fix, + } + } +} + /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and /// performing `eth_call` requests. #[derive(Debug, Clone)] @@ -58,11 +102,11 @@ pub struct ApiContracts { /// Contracts to be used when estimating gas. /// These contracts (mainly, bootloader) normally should be tuned to provide accurate /// execution metrics. - estimate_gas: BaseSystemContracts, + pub(crate) estimate_gas: MultiVMBaseSystemContracts, /// Contracts to be used when performing `eth_call` requests. /// These contracts (mainly, bootloader) normally should be tuned to provide better UX /// exeprience (e.g. revert messages). - eth_call: BaseSystemContracts, + pub(crate) eth_call: MultiVMBaseSystemContracts, } impl ApiContracts { @@ -71,8 +115,18 @@ impl ApiContracts { /// given that there is no way to fetch "playground" contracts from the main node. pub fn load_from_disk() -> Self { Self { - estimate_gas: BaseSystemContracts::estimate_gas(), - eth_call: BaseSystemContracts::playground(), + estimate_gas: MultiVMBaseSystemContracts { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + }, + eth_call: MultiVMBaseSystemContracts { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + }, } } } @@ -175,12 +229,14 @@ pub struct TxSenderConfig { pub validation_computational_gas_limit: u32, pub default_aa: H256, pub bootloader: H256, + pub chain_id: L2ChainId, } impl TxSenderConfig { pub fn new( state_keeper_config: &StateKeeperConfig, web3_json_config: &Web3JsonRpcConfig, + chain_id: L2ChainId, ) -> Self { Self { fee_account_addr: state_keeper_config.fee_account_addr, @@ -193,6 +249,7 @@ impl TxSenderConfig { .validation_computational_gas_limit, default_aa: state_keeper_config.default_aa_hash, bootloader: state_keeper_config.bootloader_hash, + chain_id, } } } @@ -223,7 +280,7 @@ pub struct TxSender(pub(super) Arc>); // Custom implementation is required due to generic param: // Even though it's under `Arc`, compiler doesn't generate the `Clone` implementation unless // an unnecessary bound is added. -impl Clone for TxSender { +impl Clone for TxSender { fn clone(&self) -> Self { Self(self.0.clone()) } @@ -267,11 +324,10 @@ impl TxSender { TxExecutionArgs::for_validation(&tx), self.0.replica_connection_pool.clone(), tx.clone().into(), - &mut HashMap::new(), ) .await; - vlog::info!( + tracing::info!( "Submit tx {:?} with execution metrics {:?}", tx.hash(), tx_metrics @@ -372,6 +428,11 @@ impl TxSender { fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, base_system_contracts: self.0.api_contracts.eth_call.clone(), caches: self.storage_caches(), + validation_computational_gas_limit: self + .0 + .sender_config + .validation_computational_gas_limit, + chain_id: self.0.sender_config.chain_id, } } @@ -383,8 +444,10 @@ impl TxSender { return Err(SubmitTxError::GasLimitIsTooBig); } + // TODO (SMA-1715): do not subsidize the overhead for the transaction + if tx.common_data.fee.gas_limit > self.0.sender_config.max_allowed_l2_tx_gas_limit.into() { - vlog::info!( + tracing::info!( "Submitted Tx is Unexecutable {:?} because of GasLimitIsTooBig {}", tx.hash(), tx.common_data.fee.gas_limit, @@ -392,7 +455,7 @@ impl TxSender { return Err(SubmitTxError::GasLimitIsTooBig); } if tx.common_data.fee.max_fee_per_gas < self.0.sender_config.fair_l2_gas_price.into() { - vlog::info!( + tracing::info!( "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", tx.hash(), tx.common_data.fee.max_fee_per_gas @@ -400,7 +463,7 @@ impl TxSender { return Err(SubmitTxError::MaxFeePerGasTooLow); } if tx.common_data.fee.max_fee_per_gas < tx.common_data.fee.max_priority_fee_per_gas { - vlog::info!( + tracing::info!( "Submitted Tx is Unexecutable {:?} because of MaxPriorityFeeGreaterThanMaxFee {}", tx.hash(), tx.common_data.fee.max_fee_per_gas @@ -541,8 +604,7 @@ impl TxSender { tx_gas_limit: u32, l1_gas_price: u64, base_fee: u64, - storage_read_cache: &mut HashMap, - ) -> Result { + ) -> (VmExecutionResultAndLogs, TransactionExecutionMetrics) { let gas_limit_with_overhead = tx_gas_limit + derive_overhead( tx_gas_limit, @@ -581,26 +643,23 @@ impl TxSender { execution_args, self.0.replica_connection_pool.clone(), tx.clone(), - storage_read_cache, ) .await; - if let Err(err) = self.ensure_tx_executable(tx, &tx_metrics, false) { - let SubmitTxError::Unexecutable(message) = err else { - unreachable!() - }; - return Err(SandboxExecutionError::Unexecutable(message)); - } - exec_result + (exec_result, tx_metrics) } fn shared_args_for_gas_estimate(&self, l1_gas_price: u64) -> TxSharedArgs { + let config = &self.0.sender_config; TxSharedArgs { - operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), + operator_account: AccountTreeId::new(config.fee_account_addr), l1_gas_price, - fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, + fair_l2_gas_price: config.fair_l2_gas_price, + // We want to bypass the computation gas limit check for gas estimation + validation_computational_gas_limit: BLOCK_GAS_LIMIT, base_system_contracts: self.0.api_contracts.estimate_gas.clone(), caches: self.storage_caches(), + chain_id: config.chain_id, } } @@ -660,7 +719,7 @@ impl TxSender { && account_code_hash == H256::zero() && tx.execute.value > self.get_balance(&tx.initiator_account()).await { - vlog::info!( + tracing::info!( "fee estimation failed on validation step. account: {} does not have enough funds for for transferring tx.value: {}.", &tx.initiator_account(), @@ -672,8 +731,7 @@ impl TxSender { // For L2 transactions we need a properly formatted signature if let ExecuteTransactionCommon::L2(l2_common_data) = &mut tx.common_data { if l2_common_data.signature.is_empty() { - l2_common_data.signature = vec![0u8; 65]; - l2_common_data.signature[64] = 27; + l2_common_data.signature = PackedEthSignature::default().serialize_packed().into(); } l2_common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); @@ -705,9 +763,6 @@ impl TxSender { pubdata_for_factory_deps * (gas_per_pubdata_byte as u32) }; - // Rolling cache with storage values that were read from the DB. - let mut storage_read_cache = HashMap::new(); - // We are using binary search to find the minimal values of gas_limit under which // the transaction succeedes let mut lower_bound = 0; @@ -717,7 +772,7 @@ impl TxSender { tx.initiator_account(), tx.nonce().unwrap_or(Nonce(0)) ); - vlog::trace!( + tracing::trace!( "fee estimation tx {:?}: preparation took {:?}, starting binary search", tx_id, estimation_started_at.elapsed(), @@ -731,7 +786,7 @@ impl TxSender { // gas limit will make the transaction successful let iteration_started_at = Instant::now(); let try_gas_limit = gas_for_bytecodes_pubdata + mid; - let result = self + let (result, _execution_metrics) = self .estimate_gas_step( vm_permit.clone(), tx.clone(), @@ -739,16 +794,16 @@ impl TxSender { try_gas_limit, l1_gas_price, base_fee, - &mut storage_read_cache, ) .await; - if result.is_err() { + + if result.result.is_failed() { lower_bound = mid + 1; } else { upper_bound = mid; } - vlog::trace!( + tracing::trace!( "fee estimation tx {:?}: iteration {} took {:?}. lower_bound: {}, upper_bound: {}", tx_id, number_of_iterations, @@ -769,7 +824,7 @@ impl TxSender { ); let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata; - let result = self + let (result, tx_metrics) = self .estimate_gas_step( vm_permit, tx.clone(), @@ -777,39 +832,36 @@ impl TxSender { suggested_gas_limit, l1_gas_price, base_fee, - &mut storage_read_cache, ) .await; - match result { - Err(err) => Err(err.into()), - Ok(_) => { - let overhead = derive_overhead( - suggested_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - OverheadCoeficients::from_tx_type(tx.tx_format() as u8), - ); + result.into_api_call_result()?; + self.ensure_tx_executable(tx.clone(), &tx_metrics, false)?; - let full_gas_limit = - match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { - (value, false) => value, - (_, true) => { - return Err(SubmitTxError::ExecutionReverted( - "exceeds block gas limit".to_string(), - vec![], - )); - } - }; - - Ok(Fee { - max_fee_per_gas: base_fee.into(), - max_priority_fee_per_gas: 0u32.into(), - gas_limit: full_gas_limit.into(), - gas_per_pubdata_limit: gas_per_pubdata_byte.into(), - }) - } - } + let overhead = derive_overhead( + suggested_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + ); + + let full_gas_limit = + match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { + (value, false) => value, + (_, true) => { + return Err(SubmitTxError::ExecutionReverted( + "exceeds block gas limit".to_string(), + vec![], + )); + } + }; + + Ok(Fee { + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0u32.into(), + gas_limit: full_gas_limit.into(), + gas_per_pubdata_limit: gas_per_pubdata_byte.into(), + }) } pub(super) async fn eth_call( @@ -821,25 +873,17 @@ impl TxSender { let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - let result = execute_tx_eth_call( + execute_tx_eth_call( vm_permit, self.shared_args(), self.0.replica_connection_pool.clone(), tx, block_args, vm_execution_cache_misses_limit, - false, + vec![], ) - .await?; - - Ok(match result.revert_reason { - Some(result) => result.original_data, - None => result - .return_data - .into_iter() - .flat_map(<[u8; 32]>::from) - .collect(), - }) + .await + .into_api_call_result() } pub fn gas_price(&self) -> u64 { @@ -879,7 +923,7 @@ impl TxSender { "Tx is Unexecutable because of {reason}; inputs for decision: {seal_data:?}" ); if log_message { - vlog::info!("{tx_hash:#?} {message}"); + tracing::info!("{tx_hash:#?} {message}"); } return Err(SubmitTxError::Unexecutable(message)); } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs index 1f1a4b63993d..4f70b1d5e503 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs @@ -44,7 +44,7 @@ impl TxProxy { pub async fn submit_tx(&self, tx: &L2Tx) -> RpcResult { let input_data = tx.common_data.input_data().expect("raw tx is absent"); let raw_tx = zksync_types::Bytes(input_data.to_vec()); - vlog::info!("Proxying tx {}", tx.hash()); + tracing::info!("Proxying tx {}", tx.hash()); self.client.send_raw_transaction(raw_tx).await } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/error.rs b/core/bin/zksync_core/src/api_server/tx_sender/result.rs similarity index 89% rename from core/bin/zksync_core/src/api_server/tx_sender/error.rs rename to core/bin/zksync_core/src/api_server/tx_sender/result.rs index 847a8206807c..ea57bcb975e7 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/error.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/result.rs @@ -1,6 +1,7 @@ use crate::api_server::execution_sandbox::SandboxExecutionError; use thiserror::Error; -use vm::oracles::tracer::ValidationError; + +use vm::{ExecutionResult, ValidationError, VmExecutionResultAndLogs}; use zksync_types::l2::error::TxCheckError; use zksync_types::U256; @@ -133,7 +134,6 @@ impl From for SubmitTxError { SandboxExecutionError::FailedToPayForTransaction(reason) => { Self::FailedToChargeFee(reason) } - SandboxExecutionError::Unexecutable(reason) => Self::Unexecutable(reason), } } } @@ -143,3 +143,23 @@ impl From for SubmitTxError { Self::ValidationFailed(err.to_string()) } } + +pub(crate) trait ApiCallResult { + fn into_api_call_result(self) -> Result, SubmitTxError>; +} + +impl ApiCallResult for VmExecutionResultAndLogs { + fn into_api_call_result(self) -> Result, SubmitTxError> { + match self.result { + ExecutionResult::Success { output } => Ok(output), + ExecutionResult::Revert { output } => Err(SubmitTxError::ExecutionReverted( + output.to_user_friendly_string(), + output.encoded_data(), + )), + ExecutionResult::Halt { reason } => { + let output: SandboxExecutionError = reason.into(); + Err(output.into()) + } + } + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs new file mode 100644 index 000000000000..492eb32a33ba --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs @@ -0,0 +1,135 @@ +use std::num::NonZeroU32; +use std::sync::Arc; + +use governor::clock::DefaultClock; +use governor::middleware::NoOpMiddleware; +use governor::state::{InMemoryState, NotKeyed}; +use governor::{Quota, RateLimiter}; + +use jsonrpc_core::middleware::Middleware; +use jsonrpc_core::*; +use jsonrpc_pubsub::Session; + +/// Configures the rate limiting for the WebSocket API. +/// Rate limiting is applied per active connection, e.g. a single connected user may not send more than X requests +/// per minute. +#[derive(Debug, Clone)] +pub struct RateLimitMetadata { + meta: T, + rate_limiter: Option>>, +} + +impl RateLimitMetadata { + pub(crate) fn new(requests_per_minute: Option, meta: T) -> Self { + let rate_limiter = if let Some(requests_per_minute) = requests_per_minute { + assert!(requests_per_minute > 0, "requests_per_minute must be > 0"); + + Some(Arc::new(RateLimiter::direct(Quota::per_minute( + NonZeroU32::new(requests_per_minute).unwrap(), + )))) + } else { + None + }; + + Self { meta, rate_limiter } + } +} + +impl jsonrpc_core::Metadata for RateLimitMetadata {} + +impl jsonrpc_pubsub::PubSubMetadata for RateLimitMetadata { + fn session(&self) -> Option> { + self.meta.session() + } +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum Transport { + Ws, +} + +impl Transport { + pub fn as_str(&self) -> &'static str { + match self { + Transport::Ws => "ws", + } + } +} + +/// Middleware that implements limiting for WebSocket connections: +/// - Limits the number of requests per minute for a single connection. +/// - Limits the maximum size of the batch requests. +/// +/// Rate limiting data is stored in the metadata of the connection, while the maximum batch size is stored in the +/// middleware itself. +#[derive(Debug)] +pub(crate) struct LimitMiddleware { + transport: Transport, + max_batch_size: Option, +} + +impl LimitMiddleware { + pub fn new(transport: Transport, max_batch_size: Option) -> Self { + Self { + transport, + max_batch_size, + } + } +} + +impl Middleware> for LimitMiddleware { + type Future = FutureResponse; + + type CallFuture = middleware::NoopCallFuture; + + fn on_request( + &self, + request: jsonrpc_core::Request, + meta: RateLimitMetadata, + next: F, + ) -> futures::future::Either + where + F: Fn(jsonrpc_core::Request, RateLimitMetadata) -> X + Send + Sync, + X: futures::Future> + Send + 'static, + { + // Check whether rate limiting is enabled, and if so, whether we should discard the request. + // Note that RPC batch requests are stil counted as a single request. + if let Some(rate_limiter) = &meta.rate_limiter { + // Check number of actual RPC requests. + let num_requests: usize = match &request { + jsonrpc_core::Request::Single(_) => 1, + jsonrpc_core::Request::Batch(batch) => batch.len(), + }; + let num_requests = NonZeroU32::new(num_requests.max(1) as u32).unwrap(); + + // Note: if required, we can extract data on rate limiting from the error. + if rate_limiter.check_n(num_requests).is_err() { + metrics::increment_counter!("api.jsonrpc_backend.batch.rate_limited", "transport" => self.transport.as_str()); + let err = jsonrpc_core::error::Error { + code: jsonrpc_core::error::ErrorCode::ServerError(429), + message: "Too many requests".to_string(), + data: None, + }; + + return futures::future::Either::Left(Box::pin(futures::future::ready(Some( + jsonrpc_core::Response::from(err, Some(Version::V2)), + )))); + } + } + + // Check whether the batch size is within the allowed limits. + if let jsonrpc_core::Request::Batch(batch) = &request { + metrics::histogram!("api.jsonrpc_backend.batch.size", batch.len() as f64, "transport" => self.transport.as_str()); + + if Some(batch.len()) > self.max_batch_size { + metrics::increment_counter!("api.jsonrpc_backend.batch.rejected", "transport" => self.transport.as_str()); + return futures::future::Either::Left(Box::pin(futures::future::ready(Some( + jsonrpc_core::Response::from(Error::invalid_request(), Some(Version::V2)), + )))); + } + } + + // Proceed with the request. + futures::future::Either::Right(next(request, meta)) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs index 2f26728d07c7..1be080d114bc 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs @@ -13,6 +13,7 @@ pub fn into_jsrpc_error(err: Web3Error) -> Error { | Web3Error::FilterNotFound | Web3Error::InvalidFeeParams(_) | Web3Error::LogsLimitExceeded(_, _, _) + | Web3Error::TooManyLogs(_) | Web3Error::InvalidFilterBlockHash => ErrorCode::InvalidParams, Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3.into(), Web3Error::PubSubTimeout => 4.into(), @@ -32,7 +33,7 @@ pub fn into_jsrpc_error(err: Web3Error) -> Error { } pub fn internal_error(method_name: &str, error: impl ToString) -> Web3Error { - vlog::error!( + tracing::error!( "Internal error in method {}: {}", method_name, error.to_string(), diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/mod.rs index ea957a5df6fb..d1d83a37d401 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod batch_limiter_middleware; pub mod error; pub mod namespaces; pub mod pub_sub; diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index 765a85bea2d7..5cfb76af62b9 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -16,7 +16,7 @@ use zksync_types::{ transaction_request::CallRequest, Address, L1BatchNumber, MiniblockNumber, H256, U256, U64, }; -use zksync_web3_decl::types::Token; +use zksync_web3_decl::types::{Filter, Log, Token}; // Local uses use crate::web3::namespaces::ZksNamespace; @@ -108,6 +108,9 @@ pub trait ZksNamespaceT { &self, version_id: Option, ) -> BoxFuture>>; + + #[rpc(name = "zks_getLogsWithVirtualBlocks")] + fn get_logs_with_virtual_blocks(&self, filter: Filter) -> BoxFuture>>; } impl ZksNamespaceT for ZksNamespace { @@ -295,4 +298,14 @@ impl ZksNamespaceT for ZksNamespa let self_ = self.clone(); Box::pin(async move { Ok(self_.get_protocol_version_impl(version_id).await) }) } + + fn get_logs_with_virtual_blocks(&self, filter: Filter) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_logs_with_virtual_blocks_impl(filter) + .await + .map_err(into_jsrpc_error) + }) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs index 2306544cd5dc..4a28a17b4e36 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs @@ -8,6 +8,7 @@ use jsonrpc_pubsub::{Session, SubscriptionId}; use zksync_web3_decl::types::PubSubResult; use super::super::namespaces::EthSubscribe; +use super::batch_limiter_middleware::RateLimitMetadata; #[rpc] pub trait Web3PubSub { @@ -35,7 +36,7 @@ pub trait Web3PubSub { } impl Web3PubSub for EthSubscribe { - type Metadata = Arc; + type Metadata = RateLimitMetadata>; fn subscribe( &self, diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs index 3ead6304d7e0..815f92b70f26 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs @@ -24,7 +24,8 @@ pub fn into_jsrpc_error(err: Web3Error) -> ErrorObjectOwned { | Web3Error::FilterNotFound | Web3Error::InvalidFeeParams(_) | Web3Error::InvalidFilterBlockHash - | Web3Error::LogsLimitExceeded(_, _, _) => ErrorCode::InvalidParams.code(), + | Web3Error::LogsLimitExceeded(_, _, _) + | Web3Error::TooManyLogs(_) => ErrorCode::InvalidParams.code(), Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3, Web3Error::PubSubTimeout => 4, Web3Error::RequestTimeout => 5, diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index 61b95d723ea3..ee675cc9e2ca 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -14,7 +14,7 @@ use zksync_types::{ use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, namespaces::zks::ZksNamespaceServer, - types::Token, + types::{Filter, Log, Token}, }; use crate::{ @@ -152,4 +152,10 @@ impl ZksNamespaceServer for ZksNa ) -> RpcResult> { Ok(self.get_protocol_version_impl(version_id).await) } + + async fn get_logs_with_virtual_blocks(&self, filter: Filter) -> RpcResult> { + self.get_logs_with_virtual_blocks_impl(filter) + .await + .map_err(into_jsrpc_error) + } } diff --git a/core/bin/zksync_core/src/api_server/web3/mod.rs b/core/bin/zksync_core/src/api_server/web3/mod.rs index 8cf1c69c8041..1267e2b67ccd 100644 --- a/core/bin/zksync_core/src/api_server/web3/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/mod.rs @@ -1,7 +1,9 @@ // External uses +use anyhow::Context as _; use futures::future; -use jsonrpc_core::{IoHandler, MetaIoHandler}; +use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::hyper; + use jsonrpc_pubsub::PubSubHandler; use serde::Deserialize; use tokio::sync::{watch, RwLock}; @@ -11,7 +13,6 @@ use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; use std::{net::SocketAddr, sync::Arc, time::Duration}; // Workspace uses -use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{api, MiniblockNumber}; @@ -29,7 +30,10 @@ use zksync_web3_decl::{ // Local uses use crate::{ - api_server::{execution_sandbox::VmConcurrencyBarrier, tx_sender::TxSender}, + api_server::{ + execution_sandbox::VmConcurrencyBarrier, tx_sender::TxSender, + web3::backend_jsonrpc::batch_limiter_middleware::RateLimitMetadata, + }, l1_gas_price::L1GasPriceProvider, sync_layer::SyncState, }; @@ -42,6 +46,7 @@ pub mod state; // Uses from submodules. use self::backend_jsonrpc::{ + batch_limiter_middleware::{LimitMiddleware, Transport}, error::internal_error, namespaces::{ debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, @@ -54,7 +59,7 @@ use self::namespaces::{ ZksNamespace, }; use self::pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; -use self::state::{Filters, InternalApiConfig, RpcState}; +use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}; /// Timeout for graceful shutdown logic within API servers. const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5); @@ -108,6 +113,7 @@ impl Namespace { pub struct ApiBuilder { backend: ApiBackend, pool: ConnectionPool, + last_miniblock_pool: ConnectionPool, config: InternalApiConfig, transport: Option, tx_sender: Option>, @@ -116,11 +122,13 @@ pub struct ApiBuilder { subscriptions_limit: Option, batch_request_size_limit: Option, response_body_size_limit: Option, + websocket_requests_per_minute_limit: Option, sync_state: Option, threads: Option, vm_concurrency_limit: Option, polling_interval: Option, namespaces: Option>, + logs_translator_enabled: bool, } impl ApiBuilder { @@ -128,6 +136,7 @@ impl ApiBuilder { Self { backend: ApiBackend::Jsonrpsee, transport: None, + last_miniblock_pool: pool.clone(), pool, sync_state: None, tx_sender: None, @@ -136,31 +145,20 @@ impl ApiBuilder { subscriptions_limit: None, batch_request_size_limit: None, response_body_size_limit: None, + websocket_requests_per_minute_limit: None, threads: None, vm_concurrency_limit: None, polling_interval: None, namespaces: None, config, + logs_translator_enabled: false, } } pub fn jsonrpc_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { backend: ApiBackend::Jsonrpc, - transport: None, - pool, - sync_state: None, - tx_sender: None, - vm_barrier: None, - filters_limit: None, - subscriptions_limit: None, - batch_request_size_limit: None, - response_body_size_limit: None, - threads: None, - vm_concurrency_limit: None, - polling_interval: None, - namespaces: None, - config, + ..Self::jsonrpsee_backend(config, pool) } } @@ -174,6 +172,14 @@ impl ApiBuilder { self } + /// Configures a dedicated DB pool to be used for updating the latest miniblock information + /// in a background task. If not called, the main pool will be used. If the API server is under high load, + /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. + pub fn with_last_miniblock_pool(mut self, pool: ConnectionPool) -> Self { + self.last_miniblock_pool = pool; + self + } + pub fn with_tx_sender( mut self, tx_sender: TxSender, @@ -204,6 +210,14 @@ impl ApiBuilder { self } + pub fn with_websocket_requests_per_minute_limit( + mut self, + websocket_requests_per_minute_limit: u32, + ) -> Self { + self.websocket_requests_per_minute_limit = Some(websocket_requests_per_minute_limit); + self + } + pub fn with_sync_state(mut self, sync_state: SyncState) -> Self { self.sync_state = Some(sync_state); self @@ -228,30 +242,49 @@ impl ApiBuilder { self.namespaces = Some(namespaces); self } + + pub fn enable_request_translator(mut self) -> Self { + tracing::info!("Logs request translator enabled"); + self.logs_translator_enabled = true; + self + } } impl ApiBuilder { - fn build_rpc_state(&self) -> RpcState { + fn build_rpc_state(self) -> RpcState { + // Chosen to be significantly smaller than the interval between miniblocks, but larger than + // the latency of getting the latest sealed miniblock number from Postgres. If the API server + // processes enough requests, information about the latest sealed miniblock will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const SEALED_MINIBLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + let (last_sealed_miniblock, update_task) = + SealedMiniblockNumber::new(self.last_miniblock_pool, SEALED_MINIBLOCK_UPDATE_INTERVAL); + // The update tasks takes care of its termination, so we don't need to retain its handle. + tokio::spawn(update_task); + RpcState { installed_filters: Arc::new(RwLock::new(Filters::new( self.filters_limit.unwrap_or(usize::MAX), ))), - connection_pool: self.pool.clone(), - tx_sender: self.tx_sender.clone().expect("TxSender is not provided"), - sync_state: self.sync_state.clone(), - api_config: self.config.clone(), + connection_pool: self.pool, + tx_sender: self.tx_sender.expect("TxSender is not provided"), + sync_state: self.sync_state, + api_config: self.config, + last_sealed_miniblock, + logs_translator_enabled: self.logs_translator_enabled, } } - async fn build_rpc_module(&self) -> RpcModule<()> { + async fn build_rpc_module(mut self) -> RpcModule<()> { + let namespaces = self.namespaces.take().unwrap(); let zksync_network_id = self.config.l2_chain_id; - let rpc_app = self.build_rpc_state(); + let rpc_state = self.build_rpc_state(); // Collect all the methods into a single RPC module. - let namespaces = self.namespaces.as_ref().unwrap(); let mut rpc = RpcModule::new(()); if namespaces.contains(&Namespace::Eth) { - rpc.merge(EthNamespace::new(rpc_app.clone()).into_rpc()) + rpc.merge(EthNamespace::new(rpc_state.clone()).into_rpc()) .expect("Can't merge eth namespace"); } if namespaces.contains(&Namespace::Net) { @@ -263,35 +296,16 @@ impl ApiBuilder { .expect("Can't merge web3 namespace"); } if namespaces.contains(&Namespace::Zks) { - rpc.merge(ZksNamespace::new(rpc_app.clone()).into_rpc()) + rpc.merge(ZksNamespace::new(rpc_state.clone()).into_rpc()) .expect("Can't merge zks namespace"); } if namespaces.contains(&Namespace::En) { - rpc.merge(EnNamespace::new(rpc_app.clone()).into_rpc()) + rpc.merge(EnNamespace::new(rpc_state.clone()).into_rpc()) .expect("Can't merge en namespace"); } if namespaces.contains(&Namespace::Debug) { - let hashes = BaseSystemContractsHashes { - default_aa: rpc_app.tx_sender.0.sender_config.default_aa, - bootloader: rpc_app.tx_sender.0.sender_config.bootloader, - }; - rpc.merge( - DebugNamespace::new( - rpc_app.connection_pool, - hashes, - rpc_app.tx_sender.0.sender_config.fair_l2_gas_price, - rpc_app - .tx_sender - .0 - .sender_config - .vm_execution_cache_misses_limit, - rpc_app.tx_sender.vm_concurrency_limiter(), - rpc_app.tx_sender.storage_caches(), - ) - .await - .into_rpc(), - ) - .expect("Can't merge debug namespace"); + rpc.merge(DebugNamespace::new(rpc_state).await.into_rpc()) + .expect("Can't merge debug namespace"); } rpc } @@ -299,13 +313,16 @@ impl ApiBuilder { pub async fn build( mut self, stop_receiver: watch::Receiver, - ) -> (Vec>, ReactiveHealthCheck) { + ) -> ( + Vec>>, + ReactiveHealthCheck, + ) { if self.filters_limit.is_none() { - vlog::warn!("Filters limit is not set - unlimited filters are allowed"); + tracing::warn!("Filters limit is not set - unlimited filters are allowed"); } if self.namespaces.is_none() { - vlog::warn!("debug_ API namespace will be disabled by default in ApiBuilder"); + tracing::warn!("debug_ API namespace will be disabled by default in ApiBuilder"); self.namespaces = Some(Namespace::NON_DEBUG.to_vec()); } @@ -316,23 +333,25 @@ impl ApiBuilder { .contains(&Namespace::Pubsub) && matches!(&self.transport, Some(ApiTransport::Http(_))) { - vlog::debug!("pubsub API is not supported for HTTP transport, ignoring"); + tracing::debug!("pubsub API is not supported for HTTP transport, ignoring"); } match (&self.transport, self.subscriptions_limit) { (Some(ApiTransport::WebSocket(_)), None) => { - vlog::warn!( + tracing::warn!( "`subscriptions_limit` is not set - unlimited subscriptions are allowed" ); } (Some(ApiTransport::Http(_)), Some(_)) => { - vlog::warn!( + tracing::warn!( "`subscriptions_limit` is ignored for HTTP transport, use WebSocket instead" ); } _ => {} } + // TODO (PLA-284): Pass `stop_receiver` into every implementation to properly + // handle shutdown signals. match (self.backend, self.transport.take()) { (ApiBackend::Jsonrpc, Some(ApiTransport::Http(addr))) => { let (api_health_check, health_updater) = ReactiveHealthCheck::new("http_api"); @@ -352,21 +371,16 @@ impl ApiBuilder { api_health_check, ) } - (ApiBackend::Jsonrpsee, Some(ApiTransport::Http(addr))) => { - let (api_health_check, health_updater) = ReactiveHealthCheck::new("http_api"); - ( - vec![ - self.build_jsonrpsee_http(addr, stop_receiver, health_updater) - .await, - ], - api_health_check, - ) - } - (ApiBackend::Jsonrpsee, Some(ApiTransport::WebSocket(addr))) => { - let (api_health_check, health_updater) = ReactiveHealthCheck::new("ws_api"); + (ApiBackend::Jsonrpsee, Some(transport)) => { + let name = match &transport { + ApiTransport::Http(_) => "http_api", + ApiTransport::WebSocket(_) => "ws_api", + }; + let (api_health_check, health_updater) = ReactiveHealthCheck::new(name); + ( vec![ - self.build_jsonrpsee_ws(addr, stop_receiver, health_updater) + self.build_jsonrpsee(transport, stop_receiver, health_updater) .await, ], api_health_check, @@ -377,41 +391,40 @@ impl ApiBuilder { } async fn build_jsonrpc_http( - self, + mut self, addr: SocketAddr, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, - ) -> tokio::task::JoinHandle<()> { + ) -> tokio::task::JoinHandle> { if self.batch_request_size_limit.is_some() { - vlog::info!("`batch_request_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + tracing::info!("`batch_request_size_limit` is not supported for HTTP `jsonrpc` backend, this value is ignored"); } if self.response_body_size_limit.is_some() { - vlog::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + tracing::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); } - let mut io_handler = IoHandler::new(); - self.extend_jsonrpc_methods(&mut io_handler).await; - let vm_barrier = self.vm_barrier.unwrap(); - + let vm_barrier = self.vm_barrier.take().unwrap(); let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .thread_name("jsonrpc-http-worker") .worker_threads(self.threads.unwrap()) .build() .unwrap(); + let mut io_handler: MetaIoHandler<()> = MetaIoHandler::default(); + self.extend_jsonrpc_methods(&mut io_handler).await; tokio::task::spawn_blocking(move || { let server = jsonrpc_http_server::ServerBuilder::new(io_handler) .threads(1) .event_loop_executor(runtime.handle().clone()) .start_http(&addr) - .unwrap(); + .context("jsonrpc_http::Server::start_http")?; let close_handle = server.close_handle(); let closing_vm_barrier = vm_barrier.clone(); runtime.handle().spawn(async move { if stop_receiver.changed().await.is_ok() { - vlog::info!("Stop signal received, HTTP JSON-RPC server is shutting down"); + tracing::info!("Stop signal received, HTTP JSON-RPC server is shutting down"); closing_vm_barrier.close(); close_handle.close(); } @@ -420,9 +433,10 @@ impl ApiBuilder { health_updater.update(HealthStatus::Ready.into()); server.wait(); drop(health_updater); - vlog::info!("HTTP JSON-RPC server stopped"); + tracing::info!("HTTP JSON-RPC server stopped"); runtime.block_on(Self::wait_for_vm(vm_barrier, "HTTP")); runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + Ok(()) }) } @@ -430,22 +444,23 @@ impl ApiBuilder { let wait_for_vm = tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, vm_barrier.wait_until_stopped()); if wait_for_vm.await.is_err() { - vlog::warn!( + tracing::warn!( "VM execution on {transport} JSON-RPC server didn't stop after {GRACEFUL_SHUTDOWN_TIMEOUT:?}; \ forcing shutdown anyway" ); } else { - vlog::info!("VM execution on {transport} JSON-RPC server stopped"); + tracing::info!("VM execution on {transport} JSON-RPC server stopped"); } } - async fn extend_jsonrpc_methods(&self, io: &mut MetaIoHandler) + async fn extend_jsonrpc_methods(mut self, io: &mut MetaIoHandler) where T: jsonrpc_core::Metadata, + S: jsonrpc_core::Middleware, { let zksync_network_id = self.config.l2_chain_id; + let namespaces = self.namespaces.take().unwrap(); let rpc_state = self.build_rpc_state(); - let namespaces = self.namespaces.as_ref().unwrap(); if namespaces.contains(&Namespace::Eth) { io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); } @@ -462,48 +477,37 @@ impl ApiBuilder { io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); } if namespaces.contains(&Namespace::Debug) { - let hashes = BaseSystemContractsHashes { - default_aa: rpc_state.tx_sender.0.sender_config.default_aa, - bootloader: rpc_state.tx_sender.0.sender_config.bootloader, - }; - let debug_ns = DebugNamespace::new( - rpc_state.connection_pool, - hashes, - rpc_state.tx_sender.0.sender_config.fair_l2_gas_price, - rpc_state - .tx_sender - .0 - .sender_config - .vm_execution_cache_misses_limit, - rpc_state.tx_sender.vm_concurrency_limiter(), - rpc_state.tx_sender.storage_caches(), - ) - .await; + let debug_ns = DebugNamespace::new(rpc_state).await; io.extend_with(debug_ns.to_delegate()); } } async fn build_jsonrpc_ws( - self, + mut self, addr: SocketAddr, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, - ) -> Vec> { - if self.batch_request_size_limit.is_some() { - vlog::info!("`batch_request_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); - } + ) -> Vec>> { if self.response_body_size_limit.is_some() { - vlog::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); + tracing::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); } + let websocket_requests_per_second_limit = self.websocket_requests_per_minute_limit; + + let batch_limiter_middleware = + LimitMiddleware::new(Transport::Ws, self.batch_request_size_limit); let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .thread_name("jsonrpc-ws-worker") .worker_threads(self.threads.unwrap()) .build() - .unwrap(); + .unwrap(); // Constructing a runtime should always succeed. + let max_connections = self.subscriptions_limit.unwrap_or(usize::MAX); + let vm_barrier = self.vm_barrier.take().unwrap(); - let mut io_handler = PubSubHandler::default(); + let io_handler: MetaIoHandler>, _> = + MetaIoHandler::with_middleware(batch_limiter_middleware); + let mut io_handler = PubSubHandler::new(io_handler); let mut notify_handles = Vec::new(); if self @@ -536,29 +540,27 @@ impl ApiBuilder { ]); io_handler.extend_with(pub_sub.to_delegate()); } - self.extend_jsonrpc_methods(&mut io_handler).await; - let max_connections = self.subscriptions_limit.unwrap_or(usize::MAX); - let vm_barrier = self.vm_barrier.unwrap(); let server_handle = tokio::task::spawn_blocking(move || { let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( io_handler, - |context: &jsonrpc_ws_server::RequestContext| { - Arc::new(jsonrpc_pubsub::Session::new(context.sender())) + move |context: &jsonrpc_ws_server::RequestContext| { + let session = Arc::new(jsonrpc_pubsub::Session::new(context.sender())); + RateLimitMetadata::new(websocket_requests_per_second_limit, session) }, ) .event_loop_executor(runtime.handle().clone()) .max_connections(max_connections) .session_stats(TrackOpenWsConnections) .start(&addr) - .unwrap(); + .context("jsonrpc_ws_server::Server::start()")?; let close_handle = server.close_handle(); let closing_vm_barrier = vm_barrier.clone(); runtime.handle().spawn(async move { if stop_receiver.changed().await.is_ok() { - vlog::info!("Stop signal received, WS JSON-RPC server is shutting down"); + tracing::info!("Stop signal received, WS JSON-RPC server is shutting down"); closing_vm_barrier.close(); close_handle.close(); } @@ -567,29 +569,44 @@ impl ApiBuilder { health_updater.update(HealthStatus::Ready.into()); server.wait().unwrap(); drop(health_updater); - vlog::info!("WS JSON-RPC server stopped"); + tracing::info!("WS JSON-RPC server stopped"); runtime.block_on(Self::wait_for_vm(vm_barrier, "WS")); runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + Ok(()) }); notify_handles.push(server_handle); notify_handles } - async fn build_jsonrpsee_http( - self, - addr: SocketAddr, + async fn build_jsonrpsee( + mut self, + transport: ApiTransport, stop_receiver: watch::Receiver, health_updater: HealthUpdater, - ) -> tokio::task::JoinHandle<()> { - let rpc = self.build_rpc_module().await; + ) -> tokio::task::JoinHandle> { + if matches!(transport, ApiTransport::WebSocket(_)) { + // TODO (SMA-1588): Implement `eth_subscribe` method for `jsonrpsee`. + tracing::warn!( + "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" + ); + + if self.websocket_requests_per_minute_limit.is_some() { + tracing::info!("`websocket_requests_per_second_limit` is not supported for `jsonrpsee` backend, this value is ignored"); + } + } + + let runtime_thread_name = match transport { + ApiTransport::Http(_) => "jsonrpsee-http-worker", + ApiTransport::WebSocket(_) => "jsonrpsee-ws-worker", + }; let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() - .thread_name("jsonrpsee-http-worker") + .thread_name(runtime_thread_name) .worker_threads(self.threads.unwrap()) .build() .unwrap(); - let vm_barrier = self.vm_barrier.unwrap(); + let vm_barrier = self.vm_barrier.take().unwrap(); let batch_request_config = if let Some(limit) = self.batch_request_size_limit { BatchRequestConfig::Limit(limit as u32) } else { @@ -600,12 +617,13 @@ impl ApiBuilder { .map(|limit| limit as u32) .unwrap_or(u32::MAX); + let rpc = self.build_rpc_module().await; + // Start the server in a separate tokio runtime from a dedicated thread. tokio::task::spawn_blocking(move || { - runtime.block_on(Self::run_jsonrpsee_server( - true, + let res = runtime.block_on(Self::run_jsonrpsee_server( rpc, - addr, + transport, stop_receiver, health_updater, vm_barrier, @@ -613,21 +631,25 @@ impl ApiBuilder { response_body_size_limit, )); runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); + res }) } #[allow(clippy::too_many_arguments)] async fn run_jsonrpsee_server( - is_http: bool, rpc: RpcModule<()>, - addr: SocketAddr, + transport: ApiTransport, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, vm_barrier: VmConcurrencyBarrier, batch_request_config: BatchRequestConfig, response_body_size_limit: u32, - ) { - let transport = if is_http { "HTTP" } else { "WS" }; + ) -> anyhow::Result<()> { + let (transport_str, is_http, addr) = match transport { + ApiTransport::Http(addr) => ("HTTP", true, addr), + ApiTransport::WebSocket(addr) => ("WS", false, addr), + }; + // Setup CORS. let cors = is_http.then(|| { CorsLayer::new() @@ -639,8 +661,8 @@ impl ApiBuilder { }); // Setup metrics for the number of in-flight requests. let (in_flight_requests, counter) = InFlightRequestsLayer::pair(); - tokio::spawn(counter.run_emitter(Duration::from_secs(10), move |count| { - metrics::histogram!("api.web3.in_flight_requests", count as f64, "scheme" => transport); + tokio::spawn(counter.run_emitter(Duration::from_millis(100), move |count| { + metrics::histogram!("api.web3.in_flight_requests", count as f64, "scheme" => transport_str); future::ready(()) })); // Assemble server middleware. @@ -660,16 +682,16 @@ impl ApiBuilder { .max_response_body_size(response_body_size_limit) .build(addr) .await - .unwrap_or_else(|err| { - panic!("Failed building {} JSON-RPC server: {}", transport, err); - }); + .with_context(|| format!("Failed building {transport_str} JSON-RPC server"))?; let server_handle = server.start(rpc); let close_handle = server_handle.clone(); let closing_vm_barrier = vm_barrier.clone(); tokio::spawn(async move { if stop_receiver.changed().await.is_ok() { - vlog::info!("Stop signal received, {transport} JSON-RPC server is shutting down"); + tracing::info!( + "Stop signal received, {transport_str} JSON-RPC server is shutting down" + ); closing_vm_barrier.close(); close_handle.stop().ok(); } @@ -678,53 +700,9 @@ impl ApiBuilder { server_handle.stopped().await; drop(health_updater); - vlog::info!("{transport} JSON-RPC server stopped"); - Self::wait_for_vm(vm_barrier, transport).await; - } - - async fn build_jsonrpsee_ws( - self, - addr: SocketAddr, - stop_receiver: watch::Receiver, - health_updater: HealthUpdater, - ) -> tokio::task::JoinHandle<()> { - vlog::warn!( - "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" - ); - - let rpc = self.build_rpc_module().await; - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name("jsonrpsee-ws-worker") - .worker_threads(self.threads.unwrap()) - .build() - .unwrap(); - let vm_barrier = self.vm_barrier.unwrap(); - - let batch_request_config = if let Some(limit) = self.batch_request_size_limit { - BatchRequestConfig::Limit(limit as u32) - } else { - BatchRequestConfig::Unlimited - }; - let response_body_size_limit = self - .response_body_size_limit - .map(|limit| limit as u32) - .unwrap_or(u32::MAX); - - // Start the server in a separate tokio runtime from a dedicated thread. - tokio::task::spawn_blocking(move || { - runtime.block_on(Self::run_jsonrpsee_server( - false, - rpc, - addr, - stop_receiver, - health_updater, - vm_barrier, - batch_request_config, - response_body_size_limit, - )); - runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); - }) + tracing::info!("{transport_str} JSON-RPC server stopped"); + Self::wait_for_vm(vm_barrier, transport_str).await; + Ok(()) } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs index da1af8238315..9ca717f0dcf4 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -1,64 +1,60 @@ +use once_cell::sync::OnceCell; use std::{sync::Arc, time::Instant}; +use vm::constants::BLOCK_GAS_LIMIT; + +use vm::ExecutionResult; -use zksync_contracts::{ - BaseSystemContracts, BaseSystemContractsHashes, PLAYGROUND_BLOCK_BOOTLOADER_CODE, -}; use zksync_dal::ConnectionPool; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, l2::L2Tx, transaction_request::CallRequest, - vm_trace::{Call, VmTrace}, - AccountTreeId, H256, USED_BOOTLOADER_MEMORY_BYTES, + vm_trace::Call, + AccountTreeId, L2ChainId, H256, USED_BOOTLOADER_MEMORY_BYTES, }; use zksync_web3_decl::error::Web3Error; +use super::report_latency_with_block_id_and_diff; use crate::api_server::{ - execution_sandbox::{execute_tx_eth_call, BlockArgs, TxSharedArgs, VmConcurrencyLimiter}, - tx_sender::SubmitTxError, - web3::{backend_jsonrpc::error::internal_error, resolve_block}, + execution_sandbox::{ + execute_tx_eth_call, ApiTracer, BlockArgs, TxSharedArgs, VmConcurrencyLimiter, + }, + tx_sender::ApiContracts, + web3::{ + backend_jsonrpc::error::internal_error, + resolve_block, + state::{RpcState, SealedMiniblockNumber}, + }, }; +use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug, Clone)] pub struct DebugNamespace { connection_pool: ConnectionPool, fair_l2_gas_price: u64, - base_system_contracts: BaseSystemContracts, + api_contracts: ApiContracts, vm_execution_cache_misses_limit: Option, vm_concurrency_limiter: Arc, storage_caches: PostgresStorageCaches, + last_sealed_miniblock: SealedMiniblockNumber, + chain_id: L2ChainId, } impl DebugNamespace { - pub async fn new( - connection_pool: ConnectionPool, - base_system_contract_hashes: BaseSystemContractsHashes, - fair_l2_gas_price: u64, - vm_execution_cache_misses_limit: Option, - vm_concurrency_limiter: Arc, - storage_caches: PostgresStorageCaches, - ) -> Self { - let mut storage = connection_pool.access_storage_tagged("api").await; - - let mut base_system_contracts = storage - .storage_dal() - .get_base_system_contracts( - base_system_contract_hashes.bootloader, - base_system_contract_hashes.default_aa, - ) - .await; - - drop(storage); + pub async fn new(state: RpcState) -> Self { + let sender_config = &state.tx_sender.0.sender_config; - base_system_contracts.bootloader = PLAYGROUND_BLOCK_BOOTLOADER_CODE.clone(); + let api_contracts = ApiContracts::load_from_disk(); Self { - connection_pool, - fair_l2_gas_price, - base_system_contracts, - vm_execution_cache_misses_limit, - vm_concurrency_limiter, - storage_caches, + connection_pool: state.connection_pool, + fair_l2_gas_price: sender_config.fair_l2_gas_price, + api_contracts, + vm_execution_cache_misses_limit: sender_config.vm_execution_cache_misses_limit, + vm_concurrency_limiter: state.tx_sender.vm_concurrency_limiter(), + storage_caches: state.tx_sender.storage_caches(), + last_sealed_miniblock: state.last_sealed_miniblock, + chain_id: sender_config.chain_id, } } @@ -80,10 +76,7 @@ impl DebugNamespace { .blocks_web3_dal() .get_trace_for_miniblock(block_number) .await; - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block_id.extract_block_tag()); - - Ok(call_trace + let call_trace = call_trace .into_iter() .map(|call_trace| { let mut result: DebugCall = call_trace.into(); @@ -92,7 +85,11 @@ impl DebugNamespace { } ResultDebugCall { result } }) - .collect()) + .collect(); + + let block_diff = self.last_sealed_miniblock.diff(block_number); + report_latency_with_block_id_and_diff(METHOD_NAME, start, block_id, block_diff); + Ok(call_trace) } #[tracing::instrument(skip(self))] @@ -133,9 +130,9 @@ impl DebugNamespace { .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let block = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); let mut connection = self.connection_pool.access_storage_tagged("api").await; - let block_args = BlockArgs::new(&mut connection, block) + let block_args = BlockArgs::new(&mut connection, block_id) .await .map_err(|err| internal_error("debug_trace_call", err))? .ok_or(Web3Error::NoBlock)?; @@ -148,6 +145,13 @@ impl DebugNamespace { let vm_permit = vm_permit.ok_or(Web3Error::InternalError)?; // We don't need properly trace if we only need top call + let call_tracer_result = Arc::new(OnceCell::default()); + let custom_tracers = if only_top_call { + vec![] + } else { + vec![ApiTracer::CallTracer(call_tracer_result.clone())] + }; + let result = execute_tx_eth_call( vm_permit, shared_args, @@ -155,32 +159,29 @@ impl DebugNamespace { tx.clone(), block_args, self.vm_execution_cache_misses_limit, - !only_top_call, + custom_tracers, ) - .await - .map_err(|err| { - let submit_tx_error = SubmitTxError::from(err); - Web3Error::SubmitTransactionError(submit_tx_error.to_string(), submit_tx_error.data()) - })?; - - let (output, revert_reason) = match result.revert_reason { - Some(result) => (vec![], Some(result.revert_reason.to_string())), - None => ( - result - .return_data - .into_iter() - .flat_map(<[u8; 32]>::from) - .collect(), - None, - ), - }; - let trace = match result.trace { - VmTrace::CallTrace(trace) => trace, - VmTrace::ExecutionTrace(_) => vec![], + .await; + + let (output, revert_reason) = match result.result { + ExecutionResult::Success { output, .. } => (output, None), + ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), + ExecutionResult::Halt { reason } => { + return Err(Web3Error::SubmitTransactionError( + reason.to_string(), + vec![], + )) + } }; + + // We had only one copy of Arc this arc is already dropped it's safe to unwrap + let trace = Arc::try_unwrap(call_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); let call = Call::new_high_level( - u32::MAX, - result.gas_used, + tx.common_data.fee.gas_limit.as_u32(), + result.statistics.gas_used, tx.execute.value, tx.execute.calldata, output, @@ -188,8 +189,8 @@ impl DebugNamespace { trace, ); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); - + let block_diff = self.last_sealed_miniblock.diff_with_block_args(&block_args); + report_latency_with_block_id_and_diff(METHOD_NAME, start, block_id, block_diff); Ok(call.into()) } @@ -198,8 +199,10 @@ impl DebugNamespace { operator_account: AccountTreeId::default(), l1_gas_price: 100_000, fair_l2_gas_price: self.fair_l2_gas_price, - base_system_contracts: self.base_system_contracts.clone(), + base_system_contracts: self.api_contracts.eth_call.clone(), caches: self.storage_caches.clone(), + validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: self.chain_id, } } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs index 16f9a57daad4..dfd96f9abad7 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,12 +1,5 @@ use std::time::Instant; -use crate::{ - api_server::{ - execution_sandbox::BlockArgs, - web3::{backend_jsonrpc::error::internal_error, resolve_block, state::RpcState}, - }, - l1_gas_price::L1GasPriceProvider, -}; use zksync_types::{ api::{ BlockId, BlockNumber, GetLogsFilter, Transaction, TransactionId, TransactionReceipt, @@ -26,6 +19,15 @@ use zksync_web3_decl::{ types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, }; +use super::report_latency_with_block_id_and_diff; +use crate::{ + api_server::{ + execution_sandbox::BlockArgs, + web3::{backend_jsonrpc::error::internal_error, resolve_block, state::RpcState}, + }, + l1_gas_price::L1GasPriceProvider, +}; + pub const EVENT_TOPIC_NUMBER_LIMIT: usize = 4; pub const PROTOCOL_VERSION: &str = "zks/1"; @@ -94,7 +96,11 @@ impl EthNamespace { let res_bytes = call_result .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + let block_diff = self + .state + .last_sealed_miniblock + .diff_with_block_args(&block_args); + report_latency_with_block_id_and_diff(METHOD_NAME, start, block, block_diff); Ok(res_bytes.into()) } @@ -192,14 +198,28 @@ impl EthNamespace { ) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); + self.report_latency_with_block_id(METHOD_NAME, start, block, block_number); Ok(balance) } + fn report_latency_with_block_id( + &self, + method_name: &'static str, + start: Instant, + block: BlockId, + block_number: MiniblockNumber, + ) { + let block_diff = self.state.last_sealed_miniblock.diff(block_number); + report_latency_with_block_id_and_diff(method_name, start, block, block_diff); + } + #[tracing::instrument(skip(self, filter))] pub async fn get_logs_impl(&self, mut filter: Filter) -> Result, Web3Error> { + if self.state.logs_translator_enabled { + return self.state.translate_get_logs(filter).await; + } + let start = Instant::now(); self.state.resolve_filter_block_hash(&mut filter).await?; @@ -270,8 +290,17 @@ impl EthNamespace { .await .map_err(|err| internal_error(method_name, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name, "block_id" => block_id.extract_block_tag()); - + if let Ok(Some(block)) = &block { + let block_number = MiniblockNumber(block.number.as_u32()); + self.report_latency_with_block_id(method_name, start, block_id, block_number); + } else { + metrics::histogram!( + "api.web3.call", + start.elapsed(), + "method" => method_name, + "block_id" => block_id.extract_block_tag() + ); + } block } @@ -293,9 +322,17 @@ impl EthNamespace { .await .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block_id.extract_block_tag()); - - tx_count + if let Ok(Some((block_number, _))) = &tx_count { + self.report_latency_with_block_id(METHOD_NAME, start, block_id, *block_number); + } else { + metrics::histogram!( + "api.web3.call", + start.elapsed(), + "method" => METHOD_NAME, + "block_id" => block_id.extract_block_tag(), + ); + } + Ok(tx_count?.map(|(_, count)| count)) } #[tracing::instrument(skip(self))] @@ -320,8 +357,7 @@ impl EthNamespace { .await .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); - + self.report_latency_with_block_id(METHOD_NAME, start, block, block_number); Ok(contract_code.unwrap_or_default().into()) } @@ -354,8 +390,7 @@ impl EthNamespace { .await .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => block.extract_block_tag()); - + self.report_latency_with_block_id(METHOD_NAME, start, block, block_number); Ok(value) } @@ -380,26 +415,34 @@ impl EthNamespace { .access_storage_tagged("api") .await; - let full_nonce = match block { - BlockId::Number(BlockNumber::Pending) => connection - .transactions_web3_dal() - .next_nonce_by_initiator_account(address) - .await - .map_err(|err| internal_error(method_name, err)), + let (full_nonce, block_number) = match block { + BlockId::Number(BlockNumber::Pending) => { + let nonce = connection + .transactions_web3_dal() + .next_nonce_by_initiator_account(address) + .await + .map_err(|err| internal_error(method_name, err)); + (nonce, None) + } _ => { let block_number = resolve_block(&mut connection, block, method_name).await?; - connection + let nonce = connection .storage_web3_dal() .get_address_historical_nonce(address, block_number) .await - .map_err(|err| internal_error(method_name, err)) + .map_err(|err| internal_error(method_name, err)); + (nonce, Some(block_number)) } }; + // TODO (SMA-1612): currently account nonce is returning always, but later we will + // return account nonce for account abstraction and deployment nonce for non account abstraction. + // Strip off deployer nonce part. let account_nonce = full_nonce.map(|nonce| decompose_full_nonce(nonce).0); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name, "block_id" => block.extract_block_tag()); - + let block_diff = + block_number.map_or(0, |number| self.state.last_sealed_miniblock.diff(number)); + report_latency_with_block_id_and_diff(method_name, start, block, block_diff); account_nonce } @@ -606,6 +649,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn protocol_version(&self) -> String { + // TODO (SMA-838): Versioning of our protocol PROTOCOL_VERSION.to_string() } @@ -617,7 +661,7 @@ impl EthNamespace { let submit_result = self.state.tx_sender.submit_tx(tx).await; let submit_result = submit_result.map(|_| hash).map_err(|err| { - vlog::debug!("Send raw transaction error: {err}"); + tracing::debug!("Send raw transaction error: {err}"); metrics::counter!( "api.submit_tx_error", 1, @@ -659,7 +703,7 @@ impl EthNamespace { &self, block_count: U64, newest_block: BlockNumber, - _reward_percentiles: Vec, + reward_percentiles: Vec, ) -> Result { const METHOD_NAME: &str = "fee_history"; @@ -690,14 +734,21 @@ impl EthNamespace { let oldest_block = newest_miniblock.0 + 1 - base_fee_per_gas.len() as u32; // We do not store gas used ratio for blocks, returns array of zeroes as a placeholder. let gas_used_ratio = vec![0.0; base_fee_per_gas.len()]; - // Effective priority gas price is currently 0, returns `reward: null` as a placeholder. - let reward = None; + // Effective priority gas price is currently 0. + let reward = Some(vec![ + vec![U256::zero(); reward_percentiles.len()]; + base_fee_per_gas.len() + ]); // `base_fee_per_gas` for next miniblock cannot be calculated, appending last fee as a placeholder. base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME, "block_id" => newest_block.to_string()); - + self.report_latency_with_block_id( + METHOD_NAME, + start, + BlockId::Number(newest_block), + newest_miniblock, + ); Ok(FeeHistory { oldest_block: web3::types::BlockNumber::Number(oldest_block.into()), base_fee_per_gas, @@ -790,7 +841,7 @@ impl EthNamespace { if let Some(miniblock_number) = storage .events_web3_dal() .get_log_block_number( - get_logs_filter.clone(), + &get_logs_filter, self.state.api_config.req_entities_limit, ) .await diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs index 08223a7d4cdd..5ece2f9eae9a 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs @@ -1,14 +1,13 @@ //! Actual implementation of Web3 API namespaces logic, not tied to the backend //! used to create a JSON RPC server. -use num::{rational::Ratio, BigUint}; +use std::time::Instant; -use zksync_types::U256; -use zksync_utils::{biguint_to_u256, u256_to_biguint}; +use zksync_types::api; mod debug; mod en; -mod eth; +pub(crate) mod eth; mod eth_subscribe; mod net; mod web3; @@ -24,8 +23,30 @@ pub use self::{ zks::ZksNamespace, }; -pub fn scale_u256(val: U256, scale_factor: &Ratio) -> U256 { - let val_as_ratio = &Ratio::from_integer(u256_to_biguint(val)); - let result = (val_as_ratio * scale_factor).ceil(); - biguint_to_u256(result.to_integer()) +/// Helper to report latency of an RPC method that takes `BlockId` as an argument. Used by `eth` +/// and `debug` namespaces. +fn report_latency_with_block_id_and_diff( + method_name: &'static str, + start: Instant, + block_id: api::BlockId, + block_diff: u32, +) { + let block_diff_label = match block_diff { + 0 => "0", + 1 => "1", + 2 => "2", + 3..=9 => "<10", + 10..=99 => "<100", + 100..=999 => "<1000", + _ => ">=1000", + }; + metrics::histogram!( + "api.web3.call", + start.elapsed(), + "method" => method_name, + "block_id" => block_id.extract_block_tag(), + "block_diff" => block_diff_label + ); + + metrics::histogram!("api.web3.call.block_diff", block_diff as f64, "method" => method_name); } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs index 0c8905489017..acf5e5848b1e 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,11 +1,8 @@ -use std::time::Instant; -use std::{collections::HashMap, convert::TryInto}; +use std::{collections::HashMap, convert::TryInto, time::Instant}; use bigdecimal::{BigDecimal, Zero}; use zksync_mini_merkle_tree::MiniMerkleTree; - -use zksync_types::l2::L2Tx; use zksync_types::{ api::{ BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, @@ -14,6 +11,7 @@ use zksync_types::{ commitment::SerializeCommitment, fee::Fee, l1::L1Tx, + l2::L2Tx, l2_to_l1_log::L2ToL1Log, tokens::ETHEREUM_ADDRESS, transaction_request::CallRequest, @@ -23,12 +21,11 @@ use zksync_types::{ use zksync_utils::address_to_h256; use zksync_web3_decl::{ error::Web3Error, - types::{Address, Token, H256}, + types::{Address, Filter, Log, Token, H256}, }; use crate::api_server::web3::{backend_jsonrpc::error::internal_error, RpcState}; -use crate::fee_ticker::FeeTicker; -use crate::fee_ticker::{error::TickerError, TokenPriceRequestType}; +use crate::fee_ticker::{error::TickerError, FeeTicker, TokenPriceRequestType}; use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug)] @@ -596,4 +593,12 @@ impl ZksNamespace { protocol_version } + + #[tracing::instrument(skip_all)] + pub async fn get_logs_with_virtual_blocks_impl( + &self, + filter: Filter, + ) -> Result, Web3Error> { + self.state.translate_get_logs(filter).await + } } diff --git a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs index f0b7a29cf5e8..b4d268bfedff 100644 --- a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs +++ b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs @@ -1,3 +1,4 @@ +use anyhow::Context as _; use jsonrpc_pubsub::typed; use tokio::sync::watch; use tokio::time::{interval, Duration, Instant}; @@ -13,18 +14,18 @@ pub async fn notify_blocks( connection_pool: ConnectionPool, polling_interval: Duration, stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { let mut last_block_number = connection_pool .access_storage_tagged("api") .await .blocks_web3_dal() .get_sealed_miniblock_number() .await - .unwrap(); + .context("get_sealed_miniblock_number()")?; let mut timer = interval(polling_interval); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, pubsub_block_notifier is shutting down"); + tracing::info!("Stop signal received, pubsub_block_notifier is shutting down"); break; } @@ -37,7 +38,7 @@ pub async fn notify_blocks( .blocks_web3_dal() .get_block_headers_after(last_block_number) .await - .unwrap(); + .with_context(|| format!("get_block_headers_after({last_block_number})"))?; metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "blocks"); if !new_blocks.is_empty() { last_block_number = @@ -62,6 +63,7 @@ pub async fn notify_blocks( metrics::histogram!("api.web3.pubsub.notify_subscribers_latency", start.elapsed(), "subscription_type" => "blocks"); } } + Ok(()) } pub async fn notify_txs( @@ -69,12 +71,12 @@ pub async fn notify_txs( connection_pool: ConnectionPool, polling_interval: Duration, stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { let mut last_time = chrono::Utc::now().naive_utc(); let mut timer = interval(polling_interval); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, pubsub_tx_notifier is shutting down"); + tracing::info!("Stop signal received, pubsub_tx_notifier is shutting down"); break; } @@ -87,7 +89,7 @@ pub async fn notify_txs( .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) .await - .unwrap(); + .context("get_pending_txs_hashes_after()")?; metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "txs"); if let Some(new_last_time) = new_last_time { last_time = new_last_time; @@ -111,6 +113,7 @@ pub async fn notify_txs( metrics::histogram!("api.web3.pubsub.notify_subscribers_latency", start.elapsed(), "subscription_type" => "txs"); } } + Ok(()) } pub async fn notify_logs( @@ -118,18 +121,18 @@ pub async fn notify_logs( connection_pool: ConnectionPool, polling_interval: Duration, stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { let mut last_block_number = connection_pool .access_storage_tagged("api") .await .blocks_web3_dal() .get_sealed_miniblock_number() .await - .unwrap(); + .context("get_sealed_miniblock_number()")?; let mut timer = interval(polling_interval); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, pubsub_logs_notifier is shutting down"); + tracing::info!("Stop signal received, pubsub_logs_notifier is shutting down"); break; } @@ -142,7 +145,7 @@ pub async fn notify_logs( .events_web3_dal() .get_all_logs(last_block_number) .await - .unwrap(); + .context("events_web3_dal().get_all_logs()")?; metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "logs"); if !new_logs.is_empty() { last_block_number = @@ -170,4 +173,5 @@ pub async fn notify_logs( metrics::histogram!("api.web3.pubsub.notify_subscribers_latency", start.elapsed(), "subscription_type" => "logs"); } } + Ok(()) } diff --git a/core/bin/zksync_core/src/api_server/web3/state.rs b/core/bin/zksync_core/src/api_server/web3/state.rs index 63d28621968f..28b67430eda1 100644 --- a/core/bin/zksync_core/src/api_server/web3/state.rs +++ b/core/bin/zksync_core/src/api_server/web3/state.rs @@ -1,22 +1,42 @@ -use std::collections::HashMap; -use std::sync::Arc; - use tokio::sync::RwLock; -use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; +use zksync_utils::h256_to_u256; -use crate::api_server::tx_sender::TxSender; -use crate::api_server::web3::{backend_jsonrpc::error::internal_error, resolve_block}; -use crate::sync_layer::SyncState; +use std::{ + collections::HashMap, + convert::TryFrom, + future::Future, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; +use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; use zksync_dal::ConnectionPool; - use zksync_types::{ - api, l2::L2Tx, transaction_request::CallRequest, Address, L1ChainId, L2ChainId, - MiniblockNumber, H256, U256, U64, + api::{self, BlockId, BlockNumber, GetLogsFilter}, + block::unpack_block_upgrade_info, + l2::L2Tx, + transaction_request::CallRequest, + AccountTreeId, Address, L1BatchNumber, L1ChainId, L2ChainId, MiniblockNumber, StorageKey, H256, + SYSTEM_CONTEXT_ADDRESS, U256, U64, VIRTUIAL_BLOCK_UPGRADE_INFO_POSITION, }; use zksync_web3_decl::{ error::Web3Error, - types::{Filter, TypedFilter}, + types::{Filter, Log, TypedFilter}, +}; + +use crate::{ + api_server::{ + execution_sandbox::BlockArgs, + tx_sender::TxSender, + web3::{ + backend_jsonrpc::error::internal_error, namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT, + resolve_block, + }, + }, + sync_layer::SyncState, }; /// Configuration values for the API. @@ -64,6 +84,83 @@ impl InternalApiConfig { } } +/// Thread-safe updatable information about the last sealed miniblock number. +/// +/// The information may be temporarily outdated and thus should only be used where this is OK +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] +/// and on an interval specified when creating an instance. +#[derive(Debug, Clone)] +pub(crate) struct SealedMiniblockNumber(Arc); + +impl SealedMiniblockNumber { + /// Creates a handle to the last sealed miniblock number together with a task that will update + /// it on a schedule. + pub fn new( + connection_pool: ConnectionPool, + update_interval: Duration, + ) -> (Self, impl Future + Send) { + let this = Self(Arc::default()); + let number_updater = this.clone(); + let update_task = async move { + loop { + if Arc::strong_count(&number_updater.0) == 1 { + // The `sealed_miniblock_number` was dropped; there's no sense continuing updates. + tracing::debug!("Stopping latest sealed miniblock updates"); + break; + } + + let mut connection = connection_pool.access_storage_tagged("api").await; + let last_sealed_miniblock = connection + .blocks_web3_dal() + .get_sealed_miniblock_number() + .await; + drop(connection); + + match last_sealed_miniblock { + Ok(number) => { + number_updater.update(number); + } + Err(err) => tracing::warn!( + "Failed fetching latest sealed miniblock to update the watch channel: {err}" + ), + } + tokio::time::sleep(update_interval).await; + } + }; + + (this, update_task) + } + + /// Potentially updates the last sealed miniblock number by comparing it to the provided + /// sealed miniblock number (not necessarily the last one). + /// + /// Returns the last sealed miniblock number after the update. + fn update(&self, maybe_newer_miniblock_number: MiniblockNumber) -> MiniblockNumber { + let prev_value = self + .0 + .fetch_max(maybe_newer_miniblock_number.0, Ordering::Relaxed); + MiniblockNumber(prev_value).max(maybe_newer_miniblock_number) + } + + pub fn diff(&self, miniblock_number: MiniblockNumber) -> u32 { + let sealed_miniblock_number = self.update(miniblock_number); + sealed_miniblock_number.0.saturating_sub(miniblock_number.0) + } + + /// Returns the difference between the latest miniblock number and the resolved miniblock number + /// from `block_args`. + pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + // We compute the difference in any case, since it may update the stored value. + let diff = self.diff(block_args.resolved_block_number()); + + if block_args.resolves_to_latest_sealed_miniblock() { + 0 // Overwrite potentially inaccurate value + } else { + diff + } + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug)] pub struct RpcState { @@ -72,6 +169,10 @@ pub struct RpcState { pub tx_sender: TxSender, pub sync_state: Option, pub(super) api_config: InternalApiConfig, + pub(super) last_sealed_miniblock: SealedMiniblockNumber, + // The flag that enables redirect of eth get logs implementation to + // implementation with virtual block translation to miniblocks + pub logs_translator_enabled: bool, } // Custom implementation is required due to generic param: @@ -85,6 +186,8 @@ impl Clone for RpcState { tx_sender: self.tx_sender.clone(), sync_state: self.sync_state.clone(), api_config: self.api_config.clone(), + last_sealed_miniblock: self.last_sealed_miniblock.clone(), + logs_translator_enabled: self.logs_translator_enabled, } } } @@ -210,6 +313,206 @@ impl RpcState { } Ok(()) } + + /// Returns logs for the given filter, taking into account block.number migration with virtual blocks + pub async fn translate_get_logs(&self, filter: Filter) -> Result, Web3Error> { + let start = Instant::now(); + const METHOD_NAME: &str = "translate_get_logs"; + + // no support for block hash filtering + if filter.block_hash.is_some() { + return Err(Web3Error::InvalidFilterBlockHash); + } + + if let Some(topics) = &filter.topics { + if topics.len() > EVENT_TOPIC_NUMBER_LIMIT { + return Err(Web3Error::TooManyTopics); + } + } + + let mut conn = self.connection_pool.access_storage_tagged("api").await; + + // get virtual block upgrade info + let upgrade_info = conn + .storage_dal() + .get_by_key(&StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + VIRTUIAL_BLOCK_UPGRADE_INFO_POSITION, + )) + .await + .ok_or_else(|| { + internal_error( + METHOD_NAME, + "Failed to get virtual block upgrade info from DB".to_string(), + ) + })?; + let (virtual_block_start_batch, virtual_block_finish_l2_block) = + unpack_block_upgrade_info(h256_to_u256(upgrade_info)); + let from_miniblock_number = + if let Some(BlockNumber::Number(block_number)) = filter.from_block { + self.resolve_miniblock_from_block( + block_number.as_u64(), + true, + virtual_block_start_batch, + virtual_block_finish_l2_block, + ) + .await? + } else { + let block_number = filter.from_block.unwrap_or(BlockNumber::Latest); + let block_id = BlockId::Number(block_number); + conn.blocks_web3_dal() + .resolve_block_id(block_id) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .unwrap() + .0 + }; + + let to_miniblock_number = if let Some(BlockNumber::Number(block_number)) = filter.to_block { + self.resolve_miniblock_from_block( + block_number.as_u64(), + true, + virtual_block_start_batch, + virtual_block_finish_l2_block, + ) + .await? + } else { + let block_number = filter.to_block.unwrap_or(BlockNumber::Latest); + let block_id = BlockId::Number(block_number); + conn.blocks_web3_dal() + .resolve_block_id(block_id) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .unwrap() + .0 + }; + + // It is considered that all logs of the miniblock where created in the last virtual block + // of this miniblock. In this case no logs are created. + // When the given virtual block range is a subrange of some miniblock virtual block range. + // e.g. given virtual block range is [11, 12] and the miniblock = 5 virtual block range is [10, 14]. + // Then `to_miniblock_number` will be 4 and `from_miniblock_number` will be 5. 4 < 5. + if to_miniblock_number < from_miniblock_number { + return Ok(vec![]); + } + + let block_filter = Filter { + from_block: Some(from_miniblock_number.into()), + to_block: Some(to_miniblock_number.into()), + ..filter.clone() + }; + + let result = self + .filter_events_changes( + block_filter, + MiniblockNumber(from_miniblock_number), + MiniblockNumber(to_miniblock_number), + ) + .await; + + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + + result + } + + async fn resolve_miniblock_from_block( + &self, + block_number: u64, + is_from: bool, + virtual_block_start_batch: u64, + virtual_block_finish_l2_block: u64, + ) -> Result { + const METHOD_NAME: &str = "resolve_miniblock_from_block"; + + let mut conn = self.connection_pool.access_storage_tagged("api").await; + + if block_number < virtual_block_start_batch { + let l1_batch = L1BatchNumber(block_number as u32); + let miniblock_range = conn + .blocks_web3_dal() + .get_miniblock_range_of_l1_batch(l1_batch) + .await + .map(|minmax| minmax.map(|(min, max)| (U64::from(min.0), U64::from(max.0)))) + .map_err(|err| internal_error(METHOD_NAME, err))?; + + match miniblock_range { + Some((batch_first_miniblock, batch_last_miniblock)) => { + if is_from { + Ok(batch_first_miniblock.as_u32()) + } else { + Ok(batch_last_miniblock.as_u32()) + } + } + _ => Err(Web3Error::NoBlock), + } + } else if virtual_block_finish_l2_block > 0 && block_number >= virtual_block_finish_l2_block + { + u32::try_from(block_number).map_err(|_| Web3Error::NoBlock) + } else { + // we have to deal with virtual blocks here + let virtual_block_miniblock = if is_from { + conn.blocks_web3_dal() + .get_miniblock_for_virtual_block_from(virtual_block_start_batch, block_number) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + } else { + conn.blocks_web3_dal() + .get_miniblock_for_virtual_block_to(virtual_block_start_batch, block_number) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + }; + virtual_block_miniblock.ok_or(Web3Error::NoBlock) + } + } + + async fn filter_events_changes( + &self, + filter: Filter, + from_block: MiniblockNumber, + to_block: MiniblockNumber, + ) -> Result, Web3Error> { + const METHOD_NAME: &str = "filter_events_changes"; + + let addresses: Vec<_> = filter + .address + .map_or_else(Vec::default, |address| address.0); + let topics: Vec<_> = filter + .topics + .into_iter() + .flatten() + .enumerate() + .filter_map(|(idx, topics)| topics.map(|topics| (idx as u32 + 1, topics.0))) + .collect(); + let get_logs_filter = GetLogsFilter { + from_block, + to_block: filter.to_block, + addresses, + topics, + }; + + let mut storage = self.connection_pool.access_storage_tagged("api").await; + + // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. + // In this case we should return error and suggest requesting logs with smaller block range. + if from_block != to_block + && storage + .events_web3_dal() + .get_log_block_number(&get_logs_filter, self.api_config.req_entities_limit) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .is_some() + { + return Err(Web3Error::TooManyLogs(self.api_config.req_entities_limit)); + } + + let logs = storage + .events_web3_dal() + .get_logs(get_logs_filter, i32::MAX as usize) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + + Ok(logs) + } } /// Contains mapping from index to `Filter` with optional location. diff --git a/core/bin/zksync_core/src/bin/block_reverter.rs b/core/bin/zksync_core/src/bin/block_reverter.rs index fcfa5ec7e1ed..48ff9def1d0c 100644 --- a/core/bin/zksync_core/src/bin/block_reverter.rs +++ b/core/bin/zksync_core/src/bin/block_reverter.rs @@ -1,3 +1,4 @@ +use anyhow::Context as _; use clap::{Parser, Subcommand}; use tokio::io::{self, AsyncReadExt}; @@ -56,6 +57,9 @@ enum Command { /// Flag that specifies if RocksDB with state keeper cache should be rolled back. #[arg(long)] rollback_sk_cache: bool, + /// Flag that allows to revert already executed blocks, it's ultra dangerous and required only for fixing external nodes + #[arg(long)] + allow_executed_block_reversion: bool, }, /// Clears failed L1 transactions. @@ -65,8 +69,22 @@ enum Command { #[tokio::main] async fn main() -> anyhow::Result<()> { - vlog::init(); - let _sentry_guard = vlog::init_sentry(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = sentry_url { + builder = builder + .with_sentry_url(&sentry_url) + .expect("Invalid Sentry URL") + .with_sentry_environment(environment); + } + let _guard = builder.build(); + let eth_sender = ETHSenderConfig::from_env(); let db_config = DBConfig::from_env(); let eth_client = ETHClientConfig::from_env(); @@ -75,8 +93,11 @@ async fn main() -> anyhow::Result<()> { let contracts = ContractsConfig::from_env(); let config = BlockReverterEthConfig::new(eth_sender, contracts, eth_client.web3_url.clone()); - let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; - let block_reverter = BlockReverter::new( + let connection_pool = ConnectionPool::builder(DbVariant::Master) + .build() + .await + .context("failed to build a connection pool")?; + let mut block_reverter = BlockReverter::new( db_config.state_keeper_db_path, db_config.merkle_tree.path, Some(config), @@ -113,6 +134,7 @@ async fn main() -> anyhow::Result<()> { rollback_postgres, rollback_tree, rollback_sk_cache, + allow_executed_block_reversion, } => { if !rollback_tree && rollback_postgres { println!("You want to rollback Postgres DB without rolling back tree."); @@ -129,6 +151,21 @@ async fn main() -> anyhow::Result<()> { } } + if allow_executed_block_reversion { + println!("You want to revert already executed blocks. It's impossible to restore them for the main node"); + println!("Make sure you are doing it ONLY for external node"); + println!("Are you sure? Print y/n"); + + let mut input = [0u8]; + io::stdin().read_exact(&mut input).await.unwrap(); + if input[0] != b'y' && input[0] != b'Y' { + std::process::exit(0); + } + block_reverter.change_rollback_executed_l1_batches_allowance( + L1ExecutedBatchesRevert::Allowed, + ); + } + let mut flags = BlockReverterFlags::empty(); if rollback_postgres { flags |= BlockReverterFlags::POSTGRES; diff --git a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs index 97a7a417fe5c..d8955ffd01b6 100644 --- a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs +++ b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs @@ -24,7 +24,7 @@ struct Cli { impl Cli { fn run(self, config: &DBConfig) { let db_path = &config.merkle_tree.path; - vlog::info!("Verifying consistency of Merkle tree at {db_path}"); + tracing::info!("Verifying consistency of Merkle tree at {db_path}"); let start = Instant::now(); let db = RocksDB::new(db_path, true); let tree = ZkSyncTree::new_lightweight(db); @@ -34,21 +34,35 @@ impl Cli { } else { let next_number = tree.next_l1_batch_number(); if next_number == L1BatchNumber(0) { - vlog::info!("Merkle tree is empty, skipping"); + tracing::info!("Merkle tree is empty, skipping"); return; } next_number - 1 }; - vlog::info!("L1 batch number to check: {l1_batch_number}"); + tracing::info!("L1 batch number to check: {l1_batch_number}"); tree.verify_consistency(l1_batch_number); - vlog::info!("Merkle tree verified in {:?}", start.elapsed()); + tracing::info!("Merkle tree verified in {:?}", start.elapsed()); } } fn main() { - vlog::init(); - let _sentry_guard = vlog::init_sentry(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = sentry_url { + builder = builder + .with_sentry_url(&sentry_url) + .expect("Invalid Sentry URL") + .with_sentry_environment(environment); + } + let _guard = builder.build(); + let db_config = DBConfig::from_env(); Cli::parse().run(&db_config); } diff --git a/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs b/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs deleted file mode 100644 index 2a771f1bcbf5..000000000000 --- a/core/bin/zksync_core/src/bin/slot_index_consistency_checker.rs +++ /dev/null @@ -1,64 +0,0 @@ -use zksync_config::DBConfig; -use zksync_dal::{connection::DbVariant, ConnectionPool}; -use zksync_merkle_tree::domain::ZkSyncTree; -use zksync_storage::RocksDB; -use zksync_types::{L1BatchNumber, H256, U256}; - -pub fn u256_to_h256_rev(num: U256) -> H256 { - let mut bytes = [0u8; 32]; - num.to_little_endian(&mut bytes); - H256::from_slice(&bytes) -} - -#[tokio::main] -async fn main() { - vlog::init(); - let db_path = DBConfig::from_env().merkle_tree.path; - vlog::info!("Verifying consistency of slot indices"); - - let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; - let mut storage = pool.access_storage().await; - - let db = RocksDB::new(db_path, true); - let tree = ZkSyncTree::new_lightweight(db); - - let next_number = tree.next_l1_batch_number(); - if next_number == L1BatchNumber(0) { - vlog::info!("Merkle tree is empty, skipping"); - return; - } - let tree_l1_batch_number = next_number - 1; - let pg_l1_batch_number = storage.blocks_dal().get_sealed_l1_batch_number().await; - - let check_up_to_l1_batch_number = tree_l1_batch_number.min(pg_l1_batch_number); - - for l1_batch_number in 0..=check_up_to_l1_batch_number.0 { - vlog::info!("Checking indices for L1 batch {l1_batch_number}"); - let pg_keys: Vec<_> = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(l1_batch_number.into()) - .await - .into_iter() - .map(|(key, index)| { - ( - key, - index.expect("Missing index in database, migration should be run beforehand"), - ) - }) - .collect(); - let keys_u256: Vec<_> = pg_keys - .iter() - .map(|(key, _)| U256::from_little_endian(key.as_bytes())) - .collect(); - - let tree_keys: Vec<_> = tree - .read_leaves(l1_batch_number.into(), &keys_u256) - .into_iter() - .zip(keys_u256) - .map(|(leaf_data, key)| (u256_to_h256_rev(key), leaf_data.unwrap().leaf_index)) - .collect(); - assert_eq!(pg_keys, tree_keys); - - vlog::info!("Indices are consistent for L1 batch {l1_batch_number}"); - } -} diff --git a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs index 4c31eac7a59a..5dc1528b6a3c 100644 --- a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs +++ b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs @@ -4,7 +4,10 @@ use zksync_types::contract_verification_api::SourceCodeData; #[tokio::main] async fn main() { - let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; + let pool = ConnectionPool::singleton(DbVariant::Replica) + .build() + .await + .unwrap(); let mut storage = pool.access_storage().await; let reqs = storage .contract_verification_dal() diff --git a/core/bin/zksync_core/src/bin/zksync_server.rs b/core/bin/zksync_core/src/bin/zksync_server.rs index a160b398aebc..70ae9c425dbd 100644 --- a/core/bin/zksync_core/src/bin/zksync_server.rs +++ b/core/bin/zksync_core/src/bin/zksync_server.rs @@ -1,6 +1,7 @@ +use anyhow::Context as _; use clap::Parser; -use std::{env, str::FromStr, time::Duration}; +use std::{str::FromStr, time::Duration}; use zksync_config::configs::chain::NetworkConfig; use zksync_config::{ContractsConfig, ETHSenderConfig}; @@ -16,7 +17,7 @@ use zksync_utils::wait_for_tasks::wait_for_tasks; static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; #[derive(Debug, Parser)] -#[structopt(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] +#[command(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] struct Cli { /// Generate genesis block for the first contract deployment using temporary DB. #[arg(long)] @@ -51,30 +52,42 @@ impl FromStr for ComponentsToRun { #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Cli::parse(); - vlog::init(); - let sentry_guard = vlog::init_sentry(); + + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = &sentry_url { + builder = builder + .with_sentry_url(sentry_url) + .expect("Invalid Sentry URL") + .with_sentry_environment(environment); + } + let _guard = builder.build(); + + // Report whether sentry is running after the logging subsystem was initialized. + if let Some(sentry_url) = sentry_url { + tracing::info!("Sentry configured with URL: {sentry_url}"); + } else { + tracing::info!("No sentry URL was provided"); + } if opt.genesis || is_genesis_needed().await { let network = NetworkConfig::from_env(); let eth_sender = ETHSenderConfig::from_env(); let contracts = ContractsConfig::from_env(); - genesis_init(ð_sender, &network, &contracts).await; + genesis_init(ð_sender, &network, &contracts) + .await + .context("genesis_init")?; if opt.genesis { return Ok(()); } } - if sentry_guard.is_some() { - vlog::info!( - "Starting Sentry url: {}, l1_network: {}, l2_network {}", - env::var("MISC_SENTRY_URL").unwrap(), - env::var("CHAIN_ETH_NETWORK").unwrap(), - env::var("CHAIN_ETH_ZKSYNC_NETWORK").unwrap(), - ); - } else { - vlog::info!("No sentry url configured"); - } - let components = if opt.rebuild_tree { vec![Component::Tree] } else { @@ -92,9 +105,9 @@ async fn main() -> anyhow::Result<()> { let (core_task_handles, stop_sender, cb_receiver, health_check_handle) = initialize_components(components, is_only_oneshot_witness_generator_task) .await - .expect("Unable to start Core actors"); + .context("Unable to start Core actors")?; - vlog::info!("Running {} core task handlers", core_task_handles.len()); + tracing::info!("Running {} core task handlers", core_task_handles.len()); let sigint_receiver = setup_sigint_handler(); let particular_crypto_alerts = None::>; @@ -103,11 +116,13 @@ async fn main() -> anyhow::Result<()> { tokio::select! { _ = wait_for_tasks(core_task_handles, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = sigint_receiver => { - vlog::info!("Stop signal received, shutting down"); + tracing::info!("Stop signal received, shutting down"); }, error = cb_receiver => { if let Ok(error_msg) = error { - vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); + let err = format!("Circuit breaker received, shutting down. Reason: {}", error_msg); + tracing::warn!("{err}"); + vlog::capture_message(&err, vlog::AlertLevel::Warning); } }, } @@ -119,6 +134,6 @@ async fn main() -> anyhow::Result<()> { // Sleep for some time to let some components gracefully stop. tokio::time::sleep(Duration::from_secs(5)).await; health_check_handle.stop().await; - vlog::info!("Stopped"); + tracing::info!("Stopped"); Ok(()) } diff --git a/core/bin/zksync_core/src/block_reverter/mod.rs b/core/bin/zksync_core/src/block_reverter/mod.rs index 283254bafac1..31c47d3a31c4 100644 --- a/core/bin/zksync_core/src/block_reverter/mod.rs +++ b/core/bin/zksync_core/src/block_reverter/mod.rs @@ -164,14 +164,14 @@ impl BlockReverter { // Rolling back Merkle tree if Path::new(&self.merkle_tree_path).exists() { - vlog::info!("Rolling back Merkle tree..."); + tracing::info!("Rolling back Merkle tree..."); Self::rollback_new_tree( last_l1_batch_to_keep, &self.merkle_tree_path, storage_root_hash, ); } else { - vlog::info!("Merkle tree not found; skipping"); + tracing::info!("Merkle tree not found; skipping"); } } @@ -194,34 +194,34 @@ impl BlockReverter { let mut tree = ZkSyncTree::new_lightweight(db); if tree.next_l1_batch_number() <= last_l1_batch_to_keep { - vlog::info!("Tree is behind the L1 batch to revert to; skipping"); + tracing::info!("Tree is behind the L1 batch to revert to; skipping"); return; } tree.revert_logs(last_l1_batch_to_keep); - vlog::info!("checking match of the tree root hash and root hash from Postgres..."); + tracing::info!("checking match of the tree root hash and root hash from Postgres..."); assert_eq!(tree.root_hash(), storage_root_hash); - vlog::info!("saving tree changes to disk..."); + tracing::info!("saving tree changes to disk..."); tree.save(); } /// Reverts blocks in the state keeper cache. async fn rollback_state_keeper_cache(&self, last_l1_batch_to_keep: L1BatchNumber) { - vlog::info!("opening DB with state keeper cache..."); + tracing::info!("opening DB with state keeper cache..."); let mut sk_cache = RocksdbStorage::new(self.state_keeper_cache_path.as_ref()); if sk_cache.l1_batch_number() > last_l1_batch_to_keep + 1 { let mut storage = self.connection_pool.access_storage().await; - vlog::info!("rolling back state keeper cache..."); + tracing::info!("rolling back state keeper cache..."); sk_cache.rollback(&mut storage, last_l1_batch_to_keep).await; } else { - vlog::info!("nothing to revert in state keeper cache"); + tracing::info!("nothing to revert in state keeper cache"); } } /// Reverts data in the Postgres database. async fn rollback_postgres(&self, last_l1_batch_to_keep: L1BatchNumber) { - vlog::info!("rolling back postgres data..."); + tracing::info!("rolling back postgres data..."); let mut storage = self.connection_pool.access_storage().await; let mut transaction = storage.start_transaction().await; @@ -231,47 +231,47 @@ impl BlockReverter { .await .expect("L1 batch should contain at least one miniblock"); - vlog::info!("rolling back transactions state..."); + tracing::info!("rolling back transactions state..."); transaction .transactions_dal() .reset_transactions_state(last_miniblock_to_keep) .await; - vlog::info!("rolling back events..."); + tracing::info!("rolling back events..."); transaction .events_dal() .rollback_events(last_miniblock_to_keep) .await; - vlog::info!("rolling back l2 to l1 logs..."); + tracing::info!("rolling back l2 to l1 logs..."); transaction .events_dal() .rollback_l2_to_l1_logs(last_miniblock_to_keep) .await; - vlog::info!("rolling back created tokens..."); + tracing::info!("rolling back created tokens..."); transaction .tokens_dal() .rollback_tokens(last_miniblock_to_keep) .await; - vlog::info!("rolling back factory deps...."); + tracing::info!("rolling back factory deps...."); transaction .storage_dal() .rollback_factory_deps(last_miniblock_to_keep) .await; - vlog::info!("rolling back storage..."); + tracing::info!("rolling back storage..."); transaction .storage_logs_dal() .rollback_storage(last_miniblock_to_keep) .await; - vlog::info!("rolling back storage logs..."); + tracing::info!("rolling back storage logs..."); transaction .storage_logs_dal() .rollback_storage_logs(last_miniblock_to_keep) .await; - vlog::info!("rolling back l1 batches..."); + tracing::info!("rolling back l1 batches..."); transaction .blocks_dal() .delete_l1_batches(last_l1_batch_to_keep) .await; - vlog::info!("rolling back miniblocks..."); + tracing::info!("rolling back miniblocks..."); transaction .blocks_dal() .delete_miniblocks(last_miniblock_to_keep) @@ -333,10 +333,10 @@ impl BlockReverter { loop { if let Some(receipt) = web3.eth().transaction_receipt(hash).await.unwrap() { assert_eq!(receipt.status, Some(1.into()), "revert transaction failed"); - vlog::info!("revert transaction has completed"); + tracing::info!("revert transaction has completed"); return; } else { - vlog::info!("waiting for L1 transaction confirmation..."); + tracing::info!("waiting for L1 transaction confirmation..."); sleep(Duration::from_secs(5)).await; } } @@ -379,7 +379,7 @@ impl BlockReverter { let last_executed_l1_batch_number = self .get_l1_batch_number_from_contract(AggregatedActionType::Execute) .await; - vlog::info!( + tracing::info!( "Last L1 batch numbers on contract: committed {last_committed_l1_batch_number}, \ verified {last_verified_l1_batch_number}, executed {last_executed_l1_batch_number}" ); @@ -408,7 +408,7 @@ impl BlockReverter { /// Clears failed L1 transactions pub async fn clear_failed_l1_transactions(&self) { - vlog::info!("clearing failed L1 transactions..."); + tracing::info!("clearing failed L1 transactions..."); self.connection_pool .access_storage() .await @@ -416,6 +416,13 @@ impl BlockReverter { .clear_failed_transactions() .await; } + + pub fn change_rollback_executed_l1_batches_allowance( + &mut self, + revert_executed_batches: L1ExecutedBatchesRevert, + ) { + self.executed_batches_revert_mode = revert_executed_batches + } } #[derive(Debug, Serialize)] diff --git a/core/bin/zksync_core/src/consistency_checker/mod.rs b/core/bin/zksync_core/src/consistency_checker/mod.rs index 2949596ba362..4283eb26761d 100644 --- a/core/bin/zksync_core/src/consistency_checker/mod.rs +++ b/core/bin/zksync_core/src/consistency_checker/mod.rs @@ -63,7 +63,7 @@ impl ConsistencyChecker { ) }); - vlog::info!( + tracing::info!( "Checking commit tx {} for batch {}", commit_tx_hash, batch_number.0 @@ -123,7 +123,10 @@ impl ConsistencyChecker { .unwrap_or(L1BatchNumber(0)) } - pub async fn run(self, stop_receiver: tokio::sync::watch::Receiver) { + pub async fn run( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { let mut batch_number: L1BatchNumber = self .last_committed_batch() .await @@ -132,11 +135,11 @@ impl ConsistencyChecker { .max(1) .into(); - vlog::info!("Starting consistency checker from batch {}", batch_number.0); + tracing::info!("Starting consistency checker from batch {}", batch_number.0); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, consistency_checker is shutting down"); + tracing::info!("Stop signal received, consistency_checker is shutting down"); break; } @@ -159,7 +162,7 @@ impl ConsistencyChecker { match self.check_commitments(batch_number).await { Ok(true) => { - vlog::info!("Batch {} is consistent with L1", batch_number.0); + tracing::info!("Batch {} is consistent with L1", batch_number.0); metrics::gauge!( "external_node.last_correct_batch", batch_number.0 as f64, @@ -168,13 +171,14 @@ impl ConsistencyChecker { batch_number.0 += 1; } Ok(false) => { - panic!("Batch {} is inconsistent with L1", batch_number.0); + anyhow::bail!("Batch {} is inconsistent with L1", batch_number.0); } Err(e) => { - vlog::warn!("Consistency checker error: {}", e); + tracing::warn!("Consistency checker error: {}", e); tokio::time::sleep(SLEEP_DELAY).await; } } } + Ok(()) } } diff --git a/core/bin/zksync_core/src/data_fetchers/error.rs b/core/bin/zksync_core/src/data_fetchers/error.rs index 13febd297d3f..bdee49210082 100644 --- a/core/bin/zksync_core/src/data_fetchers/error.rs +++ b/core/bin/zksync_core/src/data_fetchers/error.rs @@ -42,7 +42,7 @@ impl ErrorAnalyzer { pub async fn update(&mut self) { if self.error_counter >= self.min_errors_to_report { - vlog::error!( + tracing::error!( "[{}] A lot of requests to the remote API failed in a row. Current error count: {}", &self.fetcher, self.error_counter @@ -59,7 +59,7 @@ impl ErrorAnalyzer { self.error_counter += 1; match error { ApiFetchError::RateLimit(time) => { - vlog::warn!( + tracing::warn!( "[{}] Remote API notified us about rate limiting. Going to wait {} seconds before next loop iteration", fetcher, time.as_secs() @@ -67,16 +67,16 @@ impl ErrorAnalyzer { self.requested_delay = Some(time); } ApiFetchError::UnexpectedJsonFormat(err) => { - vlog::warn!("[{}] Parse data error: {}", fetcher, err); + tracing::warn!("[{}] Parse data error: {}", fetcher, err); } ApiFetchError::ApiUnavailable(err) => { - vlog::warn!("[{}] Remote API is unavailable: {}", fetcher, err); + tracing::warn!("[{}] Remote API is unavailable: {}", fetcher, err); } ApiFetchError::RequestTimeout => { - vlog::warn!("[{}] Request for data timed out", fetcher); + tracing::warn!("[{}] Request for data timed out", fetcher); } ApiFetchError::Other(err) => { - vlog::warn!("[{}] Unspecified API error: {}", fetcher, err); + tracing::warn!("[{}] Unspecified API error: {}", fetcher, err); } } } diff --git a/core/bin/zksync_core/src/data_fetchers/mod.rs b/core/bin/zksync_core/src/data_fetchers/mod.rs index 6ab0453eb09d..ce62a657d970 100644 --- a/core/bin/zksync_core/src/data_fetchers/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/mod.rs @@ -24,7 +24,7 @@ pub fn run_data_fetchers( network: zksync_types::network::Network, pool: ConnectionPool, stop_receiver: watch::Receiver, -) -> Vec> { +) -> Vec>> { let list_fetcher = token_list::TokenListFetcher::new(config.clone(), network); let price_fetcher = token_price::TokenPriceFetcher::new(config.clone()); let volume_fetcher = token_trading_volume::TradingVolumeFetcher::new(config.clone()); diff --git a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs index e96d01b97931..cb7a631e5d2f 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs @@ -62,13 +62,17 @@ impl TokenListFetcher { } } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + mut self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { let mut fetching_interval = tokio::time::interval(self.config.token_list.fetching_interval()); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, token_list_fetcher is shutting down"); + tracing::info!("Stop signal received, token_list_fetcher is shutting down"); break; } @@ -93,6 +97,7 @@ impl TokenListFetcher { self.update_tokens(&mut storage, token_list).await; } + Ok(()) } async fn fetch_token_list(&self) -> Result, ApiFetchError> { diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs index 0f2868090f34..e25ce2f11b2e 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs @@ -62,13 +62,17 @@ impl TokenPriceFetcher { } } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + mut self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { let mut fetching_interval = tokio::time::interval(self.config.token_price.fetching_interval()); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, token_price_fetcher is shutting down"); + tracing::info!("Stop signal received, token_price_fetcher is shutting down"); break; } @@ -92,6 +96,7 @@ impl TokenPriceFetcher { }; self.store_token_prices(&mut storage, token_prices).await; } + Ok(()) } async fn fetch_token_price( diff --git a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs index 396128afbc6c..1713e8b30dbf 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs @@ -55,12 +55,16 @@ impl TradingVolumeFetcher { } } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + mut self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { let mut fetching_interval = tokio::time::interval(self.config.token_trading_volume.fetching_interval()); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, trading_volume_fetcher is shutting down"); + tracing::info!("Stop signal received, trading_volume_fetcher is shutting down"); break; } @@ -84,6 +88,7 @@ impl TradingVolumeFetcher { self.store_market_volumes(&mut storage, trading_volumes) .await; } + Ok(()) } async fn fetch_trading_volumes( diff --git a/core/bin/zksync_core/src/eth_sender/aggregator.rs b/core/bin/zksync_core/src/eth_sender/aggregator.rs index c19a8885f680..6a99a5f596b3 100644 --- a/core/bin/zksync_core/src/eth_sender/aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/aggregator.rs @@ -1,6 +1,8 @@ -use zksync_config::configs::eth_sender::{ProofSendingMode, SenderConfig}; +use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; +use zksync_object_store::ObjectStore; +use zksync_prover_utils::gcs_proof_fetcher::load_wrapped_fri_proofs_for_range; use zksync_types::{ aggregated_operations::{ AggregatedActionType, AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, @@ -23,10 +25,11 @@ pub struct Aggregator { proof_criteria: Vec>, execute_criteria: Vec>, config: SenderConfig, + blob_store: Box, } impl Aggregator { - pub fn new(config: SenderConfig) -> Self { + pub fn new(config: SenderConfig, blob_store: Box) -> Self { Self { commit_criteria: vec![ Box::from(NumberCriterion { @@ -81,6 +84,7 @@ impl Aggregator { }), ], config, + blob_store, } } @@ -199,12 +203,15 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, prover_storage: &mut StorageProcessor<'_>, l1_verifier_config: L1VerifierConfig, + proof_loading_mode: &ProofLoadingMode, + blob_store: &dyn ObjectStore, ) -> Option { let previous_proven_batch_number = storage.blocks_dal().get_last_l1_batch_with_prove_tx().await; + let batch_to_prove = previous_proven_batch_number + 1; if let Some(version_id) = storage .blocks_dal() - .get_batch_protocol_version_id(previous_proven_batch_number + 1) + .get_batch_protocol_version_id(batch_to_prove) .await { let verifier_config_for_next_batch = storage @@ -216,13 +223,17 @@ impl Aggregator { return None; } } - let proofs = prover_storage - .prover_dal() - .get_final_proofs_for_blocks( - previous_proven_batch_number + 1, - previous_proven_batch_number + 1, - ) - .await; + let proofs = match proof_loading_mode { + ProofLoadingMode::OldProofFromDb => { + prover_storage + .prover_dal() + .get_final_proofs_for_blocks(batch_to_prove, batch_to_prove) + .await + } + ProofLoadingMode::FriProofFromGcs => { + load_wrapped_fri_proofs_for_range(batch_to_prove, batch_to_prove, blob_store).await + } + }; if proofs.is_empty() { // The proof for the next L1 batch is not generated yet return None; @@ -297,7 +308,14 @@ impl Aggregator { ) -> Option { match self.config.proof_sending_mode { ProofSendingMode::OnlyRealProofs => { - Self::load_real_proof_operation(storage, prover_storage, l1_verifier_config).await + Self::load_real_proof_operation( + storage, + prover_storage, + l1_verifier_config, + &self.config.proof_loading_mode, + &*self.blob_store, + ) + .await } ProofSendingMode::SkipEveryProof => { @@ -315,9 +333,14 @@ impl Aggregator { ProofSendingMode::OnlySampledProofs => { // if there is a sampled proof then send it, otherwise check for skipped ones. - if let Some(op) = - Self::load_real_proof_operation(storage, prover_storage, l1_verifier_config) - .await + if let Some(op) = Self::load_real_proof_operation( + storage, + prover_storage, + l1_verifier_config, + &self.config.proof_loading_mode, + &*self.blob_store, + ) + .await { Some(op) } else { diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 13d565d29773..5df03c15f766 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -73,13 +73,13 @@ impl EthTxAggregator { prover_pool: ConnectionPool, eth_client: E, stop_receiver: watch::Receiver, - ) { + ) -> anyhow::Result<()> { loop { let mut storage = pool.access_storage_tagged("eth_sender").await; let mut prover_storage = prover_pool.access_storage_tagged("eth_sender").await; if *stop_receiver.borrow() { - vlog::info!("Stop signal received, eth_tx_aggregator is shutting down"); + tracing::info!("Stop signal received, eth_tx_aggregator is shutting down"); break; } @@ -89,11 +89,12 @@ impl EthTxAggregator { { // Web3 API request failures can cause this, // and anything more important is already properly reported. - vlog::warn!("eth_sender error {err:?}"); + tracing::warn!("eth_sender error {err:?}"); } tokio::time::sleep(self.config.aggregate_tx_poll_period()).await; } + Ok(()) } pub(super) async fn get_multicall_data( @@ -359,7 +360,7 @@ impl EthTxAggregator { tx: &EthTx, ) { let l1_batch_number_range = aggregated_op.l1_batch_range(); - vlog::info!( + tracing::info!( "eth_tx with ID {} for op {} was saved for L1 batches {l1_batch_number_range:?}", tx.id, aggregated_op.get_action_caption() diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs index de3703e8b37f..1469e17612f1 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -1,3 +1,4 @@ +use anyhow::Context as _; use std::sync::Arc; use tokio::sync::watch; @@ -89,7 +90,7 @@ where match self.get_tx_status(history_item.tx_hash).await { Ok(Some(s)) => return Some(s), Ok(_) => continue, - Err(err) => vlog::warn!( + Err(err) => tracing::warn!( "Can't check transaction {:?}: {:?}", history_item.tx_hash, err @@ -112,7 +113,7 @@ where let priority_fee_per_gas = self .increase_priority_fee(storage, tx.id, base_fee_per_gas) .await?; - vlog::info!( + tracing::info!( "Resending operation {} with base fee {:?} and priority fee {:?}", tx.id, base_fee_per_gas, @@ -157,7 +158,7 @@ where if base_fee_per_gas <= next_block_minimal_base_fee.min(previous_base_fee) { // If the base fee is lower than the previous used one // or is lower than the minimal possible value for the next block, sending is skipped. - vlog::info!( + tracing::info!( "Skipping gas adjustment for operation {}, \ base_fee_per_gas: suggested for resending {:?}, previously sent {:?}, next block minimum {:?}", eth_tx_id, @@ -214,7 +215,7 @@ where .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx, current_block) .await { - vlog::warn!( + tracing::warn!( "Error when sending new signed tx for tx {}, base_fee_per_gas {}, priority_fee_per_gas: {}: {}", tx.id, base_fee_per_gas, @@ -319,7 +320,7 @@ where inflight_txs.len() as f64, ); - vlog::trace!( + tracing::trace!( "Going through not confirmed txs. \ Block numbers: latest {}, finalized {}, \ operator's nonce: latest {}, finalized {}", @@ -331,7 +332,7 @@ where // Not confirmed transactions, ordered by nonce for tx in inflight_txs { - vlog::trace!("Checking tx id: {}", tx.id,); + tracing::trace!("Checking tx id: {}", tx.id,); // If the `operator_nonce.latest` <= `tx.nonce`, this means // that `tx` is not mined and we should resend it. @@ -354,7 +355,7 @@ where continue; } - vlog::trace!( + tracing::trace!( "Sender's nonce on finalized block is greater than current tx's nonce. \ Checking transaction with id {}. Tx nonce is equal to {}", tx.id, @@ -371,7 +372,7 @@ where // This is an error because such a big reorg may cause transactions that were // previously recorded as confirmed to become pending again and we have to // make sure it's not the case - otherwise eth_sender may not work properly. - vlog::error!( + tracing::error!( "Possible block reorgs: finalized nonce increase detected, but no tx receipt found for tx {:?}", &tx ); @@ -392,6 +393,7 @@ where tx.raw_tx.clone(), tx.contract_address, Options::with(|opt| { + // TODO Calculate gas for every operation SMA-1436 opt.gas = Some(self.config.max_aggregated_tx_gas.into()); opt.max_fee_per_gas = Some(U256::from(base_fee_per_gas + priority_fee_per_gas)); opt.max_priority_fee_per_gas = Some(U256::from(priority_fee_per_gas)); @@ -415,7 +417,7 @@ where let tx_status = self.get_tx_status(tx.tx_hash).await; if let Ok(Some(tx_status)) = tx_status { - vlog::info!("The tx {:?} has been already sent", tx.tx_hash); + tracing::info!("The tx {:?} has been already sent", tx.tx_hash); storage .eth_sender_dal() .set_sent_at_block(tx.id, tx_status.receipt.block_number.unwrap().as_u32()) @@ -438,7 +440,7 @@ where ) .await { - vlog::warn!("Error {:?} in sending tx {:?}", error, &tx); + tracing::warn!("Error {:?} in sending tx {:?}", error, &tx); } } } @@ -458,7 +460,7 @@ where self.fail_tx(storage, tx, tx_status).await; } } else { - vlog::debug!( + tracing::debug!( "Transaction {} with id {} is not yet finalized: block in receipt {receipt_block_number}, finalized block {finalized_block}", tx_status.tx_hash, tx.id, @@ -484,7 +486,7 @@ where "Tx is already failed, it's safe to fail here and apply the status on the next run", ); - vlog::error!( + tracing::error!( "Eth tx failed {:?}, {:?}, failure reason {:?}", tx, tx_status.receipt, @@ -513,7 +515,7 @@ where track_eth_tx_metrics(storage, "mined", tx).await; if gas_used > U256::from(tx.predicted_gas_cost) { - vlog::error!( + tracing::error!( "Predicted gas {} lower than used gas {} for tx {:?} {}", tx.predicted_gas_cost, gas_used, @@ -521,7 +523,7 @@ where tx.id ); } - vlog::info!( + tracing::info!( "eth_tx {} with hash {:?} for {} is confirmed. Gas spent: {:?}", tx.id, tx_hash, @@ -551,9 +553,16 @@ where ); } - pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + mut self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { { - let l1_block_numbers = self.get_l1_block_numbers().await.unwrap(); + let l1_block_numbers = self + .get_l1_block_numbers() + .await + .context("get_l1_block_numbers()")?; let mut storage = pool.access_storage_tagged("eth_sender").await; self.send_unsent_txs(&mut storage, l1_block_numbers).await; } @@ -565,7 +574,7 @@ where let mut storage = pool.access_storage_tagged("eth_sender").await; if *stop_receiver.borrow() { - vlog::info!("Stop signal received, eth_tx_manager is shutting down"); + tracing::info!("Stop signal received, eth_tx_manager is shutting down"); break; } @@ -574,12 +583,13 @@ where Err(e) => { // Web3 API request failures can cause this, // and anything more important is already properly reported. - vlog::warn!("eth_sender error {:?}", e); + tracing::warn!("eth_sender error {:?}", e); } } tokio::time::sleep(self.config.tx_poll_period()).await; } + Ok(()) } async fn send_new_eth_txs( diff --git a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs index 2f9e1c706c48..d96b94a56e88 100644 --- a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs +++ b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs @@ -19,7 +19,7 @@ pub async fn track_eth_tx_metrics( // This should be only the case when some blocks were reverted. if l1_batch_headers.is_empty() { - vlog::warn!("No L1 batches were found for eth_tx with id = {}", tx.id); + tracing::warn!("No L1 batches were found for eth_tx with id = {}", tx.id); return; } diff --git a/core/bin/zksync_core/src/eth_sender/publish_criterion.rs b/core/bin/zksync_core/src/eth_sender/publish_criterion.rs index 1046496c4295..1870ecd05bda 100644 --- a/core/bin/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/bin/zksync_core/src/eth_sender/publish_criterion.rs @@ -52,7 +52,7 @@ impl L1BatchPublishCriterion for NumberCriterion { let batch_count = last_batch_number - first + 1; if batch_count >= self.limit { let result = L1BatchNumber(first + self.limit - 1); - vlog::debug!( + tracing::debug!( "`l1_batch_number` publish criterion (limit={}) triggered for op {} with L1 batch range {:?}", self.limit, self.op, @@ -109,7 +109,7 @@ impl L1BatchPublishCriterion for TimestampDeadlineCriterion { .unwrap_or(first_l1_batch) .header .number; - vlog::debug!( + tracing::debug!( "`timestamp` publish criterion triggered for op {} with L1 batch range {:?}", self.op, first_l1_batch.header.number.0..=result.0 @@ -189,7 +189,7 @@ impl L1BatchPublishCriterion for GasCriterion { if let Some(last_l1_batch) = last_l1_batch { let first_l1_batch_number = consecutive_l1_batches.first().unwrap().header.number.0; - vlog::debug!( + tracing::debug!( "`gas_limit` publish criterion (gas={}) triggered for op {} with L1 batch range {:?}", self.gas_limit - gas_left, self.op, @@ -240,7 +240,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { let first_l1_batch_number = consecutive_l1_batches.first().unwrap().header.number.0; let output = l1_batch.header.number - 1; - vlog::debug!( + tracing::debug!( "`data_size` publish criterion (data={}) triggered for op {} with L1 batch range {:?}", self.data_limit - data_size_left, self.op, diff --git a/core/bin/zksync_core/src/eth_sender/tests.rs b/core/bin/zksync_core/src/eth_sender/tests.rs index ee9f739b2125..ff60034d8ad4 100644 --- a/core/bin/zksync_core/src/eth_sender/tests.rs +++ b/core/bin/zksync_core/src/eth_sender/tests.rs @@ -9,6 +9,7 @@ use zksync_config::{ use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::mock::MockEthereum, EthInterface}; +use zksync_object_store::ObjectStoreFactory; use zksync_types::{ aggregated_operations::{ AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, L1BatchProofOperation, @@ -85,6 +86,7 @@ impl EthSenderTester { .await .unwrap(), ); + let store_factory = ObjectStoreFactory::from_env(); let aggregator = EthTxAggregator::new( SenderConfig { @@ -92,7 +94,10 @@ impl EthSenderTester { ..eth_sender_config.sender.clone() }, // Aggregator - unused - Aggregator::new(aggregator_config.clone()), + Aggregator::new( + aggregator_config.clone(), + store_factory.create_store().await, + ), // zkSync contract address Address::random(), contracts_config.l1_multicall3_addr, @@ -794,7 +799,7 @@ async fn insert_genesis_protocol_version(tester: &EthSenderTester) { .storage() .await .protocol_versions_dal() - .save_protocol_version(Default::default()) + .save_protocol_version_with_tx(Default::default()) .await; } diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs index e3b4c3a3ecc5..e14ffc09a623 100644 --- a/core/bin/zksync_core/src/eth_watch/client.rs +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -61,7 +61,7 @@ impl EthHttpQueryClient { zksync_contract_addr: Address, confirmations_for_eth_event: Option, ) -> Self { - vlog::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); + tracing::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); Self { client, topics: Vec::new(), @@ -119,7 +119,7 @@ impl EthClient for EthHttpQueryClient EthClient for EthHttpQueryClient= mid { return Err(Error::InfiniteRecursion); } - vlog::warn!( + tracing::warn!( "Splitting block range in half: {:?} - {:?} - {:?}", from, mid, @@ -178,7 +178,7 @@ impl EthClient for EthHttpQueryClient 0 { - vlog::warn!("Retrying. Retries left: {:?}", retries_left); + tracing::warn!("Retrying. Retries left: {:?}", retries_left); result = self.get_events(from, to, retries_left - 1).await; } } diff --git a/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs b/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs index d10b5893091d..729f217419dd 100644 --- a/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs +++ b/core/bin/zksync_core/src/eth_watch/event_processors/priority_ops.rs @@ -50,7 +50,7 @@ impl EventProcessor for PriorityOpsEventProcessor { let first = &priority_ops[0]; let last = &priority_ops[priority_ops.len() - 1]; - vlog::debug!( + tracing::debug!( "Received priority requests with serial ids: {} (block {}) - {} (block {})", first.serial_id(), first.eth_block(), diff --git a/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs b/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs index a77daed6736d..acf66b7514fb 100644 --- a/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs +++ b/core/bin/zksync_core/src/eth_watch/event_processors/upgrades.rs @@ -59,7 +59,7 @@ impl EventProcessor for UpgradesEventProcessor { .iter() .map(|(u, _)| format!("{}", u.id as u16)) .collect(); - vlog::debug!("Received upgrades with ids: {}", ids_str.join(", ")); + tracing::debug!("Received upgrades with ids: {}", ids_str.join(", ")); let new_upgrades: Vec<_> = upgrades .into_iter() @@ -80,7 +80,7 @@ impl EventProcessor for UpgradesEventProcessor { let new_version = previous_version.apply_upgrade(upgrade, scheduler_vk_hash); storage .protocol_versions_dal() - .save_protocol_version(new_version) + .save_protocol_version_with_tx(new_version) .await; } metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist_upgrades"); diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index 2d2d7276257f..8a756baddd61 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -54,7 +54,7 @@ impl EthWatch { let state = Self::initialize_state(&client, &mut storage).await; - vlog::info!("initialized state: {:?}", state); + tracing::info!("initialized state: {:?}", state); let priority_ops_processor = PriorityOpsEventProcessor::new(state.next_expected_priority_id); @@ -114,11 +114,15 @@ impl EthWatch { } } - pub async fn run(&mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { + pub async fn run( + &mut self, + pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) -> anyhow::Result<()> { let mut timer = tokio::time::interval(self.poll_interval); loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, eth_watch is shutting down"); + tracing::info!("Stop signal received, eth_watch is shutting down"); break; } @@ -130,13 +134,14 @@ impl EthWatch { if let Err(error) = self.loop_iteration(&mut storage).await { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. - vlog::error!("Failed to process new blocks {}", error); + tracing::error!("Failed to process new blocks {}", error); self.last_processed_ethereum_block = Self::initialize_state(&self.client, &mut storage) .await .last_processed_ethereum_block; } } + Ok(()) } #[tracing::instrument(skip(self, storage))] @@ -174,7 +179,7 @@ pub async fn start_eth_watch( eth_gateway: E, diamond_proxy_addr: Address, stop_receiver: watch::Receiver, -) -> JoinHandle<()> { +) -> JoinHandle> { let eth_watch = ETHWatchConfig::from_env(); let eth_client = EthHttpQueryClient::new( eth_gateway, @@ -184,7 +189,5 @@ pub async fn start_eth_watch( let mut eth_watch = EthWatch::new(eth_client, &pool, eth_watch.poll_interval()).await; - tokio::spawn(async move { - eth_watch.run(pool, stop_receiver).await; - }) + tokio::spawn(async move { eth_watch.run(pool, stop_receiver).await }) } diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs index b94e85879964..32d3837fecc2 100644 --- a/core/bin/zksync_core/src/eth_watch/tests.rs +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -627,7 +627,7 @@ async fn setup_db(connection_pool: &ConnectionPool) { .access_test_storage() .await .protocol_versions_dal() - .save_protocol_version(ProtocolVersion { + .save_protocol_version_with_tx(ProtocolVersion { id: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), ..Default::default() }) diff --git a/core/bin/zksync_core/src/fee_ticker/mod.rs b/core/bin/zksync_core/src/fee_ticker/mod.rs index 35facd0e4b35..ef8e6fa05470 100644 --- a/core/bin/zksync_core/src/fee_ticker/mod.rs +++ b/core/bin/zksync_core/src/fee_ticker/mod.rs @@ -4,7 +4,7 @@ use core::fmt::Debug; use bigdecimal::BigDecimal; use num::{rational::Ratio, BigUint}; -use vm::vm_with_bootloader::base_fee_to_gas_per_pubdata; +use vm::utils::fee::base_fee_to_gas_per_pubdata; use zksync_types::Address; use zksync_utils::ratio_to_big_decimal_normalized; @@ -73,7 +73,7 @@ impl FeeTicker { .ok_or_else(|| { // It's kinda not OK that we have a price for token, but no metadata. // Not a reason for a panic, but surely highest possible report level. - vlog::error!( + tracing::error!( "Token {:x} has price, but no stored metadata", l2_token_addr ); diff --git a/core/bin/zksync_core/src/genesis.rs b/core/bin/zksync_core/src/genesis.rs index e47a6025282c..7073b652b70b 100644 --- a/core/bin/zksync_core/src/genesis.rs +++ b/core/bin/zksync_core/src/genesis.rs @@ -2,30 +2,32 @@ //! It initializes the Merkle tree with the basic setup (such as fields of special service accounts), //! setups the required databases, and outputs the data required to initialize a smart contract. -use vm::zk_evm::aux_structures::{LogQuery, Timestamp}; +use anyhow::Context as _; + use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_merkle_tree::domain::ZkSyncTree; + use zksync_types::{ block::DeployedContract, - block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{legacy_miniblock_hash, BlockGasCount, L1BatchHeader, MiniblockHeader}, commitment::{L1BatchCommitment, L1BatchMetadata}, get_code_key, get_system_context_init_logs, protocol_version::{L1VerifierConfig, ProtocolVersion}, tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, - StorageKey, StorageLog, StorageLogKind, H256, -}; -use zksync_utils::{ - be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash, u256_to_h256, + AccountTreeId, Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, ProtocolVersionId, + StorageKey, StorageLog, StorageLogKind, Timestamp, H256, }; +use zksync_utils::{be_words_to_bytes, h256_to_u256}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use crate::metadata_calculator::L1BatchWithLogs; #[derive(Debug, Clone)] pub struct GenesisParams { pub first_validator: Address, + pub protocol_version: ProtocolVersionId, pub base_system_contracts: BaseSystemContracts, pub system_contracts: Vec, pub first_verifier_address: Address, @@ -36,22 +38,23 @@ pub async fn ensure_genesis_state( storage: &mut StorageProcessor<'_>, zksync_chain_id: L2ChainId, genesis_params: &GenesisParams, -) -> H256 { +) -> anyhow::Result { let mut transaction = storage.start_transaction().await; // return if genesis block was already processed if !transaction.blocks_dal().is_genesis_needed().await { - vlog::debug!("genesis is not needed!"); + tracing::debug!("genesis is not needed!"); return transaction .blocks_dal() .get_l1_batch_state_root(L1BatchNumber(0)) .await - .expect("genesis block hash is empty"); + .context("genesis block hash is empty"); } - vlog::info!("running regenesis"); + tracing::info!("running regenesis"); let GenesisParams { first_validator, + protocol_version, base_system_contracts, system_contracts, first_verifier_address, @@ -64,13 +67,14 @@ pub async fn ensure_genesis_state( &mut transaction, *first_validator, zksync_chain_id, + *protocol_version, base_system_contracts, system_contracts, *first_l1_verifier_config, *first_verifier_address, ) .await; - vlog::info!("chain_schema_genesis is complete"); + tracing::info!("chain_schema_genesis is complete"); let storage_logs = L1BatchWithLogs::new(&mut transaction, L1BatchNumber(0)).await; let storage_logs = storage_logs.unwrap().storage_logs; @@ -95,7 +99,7 @@ pub async fn ensure_genesis_state( rollup_last_leaf_index, ) .await; - vlog::info!("operations_schema_genesis is complete"); + tracing::info!("operations_schema_genesis is complete"); transaction.commit().await; @@ -118,7 +122,7 @@ pub async fn ensure_genesis_state( base_system_contracts_hashes.default_aa ); - genesis_root_hash + Ok(genesis_root_hash) } // Default account and bootloader are not a regular system contracts @@ -236,17 +240,19 @@ async fn insert_system_contracts( transaction.commit().await; } +#[allow(clippy::too_many_arguments)] pub(crate) async fn create_genesis_l1_batch( storage: &mut StorageProcessor<'_>, first_validator_address: Address, chain_id: L2ChainId, + protocol_version: ProtocolVersionId, base_system_contracts: &BaseSystemContracts, system_contracts: &[DeployedContract], l1_verifier_config: L1VerifierConfig, verifier_address: Address, ) { let version = ProtocolVersion { - id: ProtocolVersionId::latest(), + id: protocol_version, timestamp: 0, l1_verifier_config, base_system_contracts_hashes: base_system_contracts.hashes(), @@ -266,7 +272,7 @@ pub(crate) async fn create_genesis_l1_batch( let genesis_miniblock_header = MiniblockHeader { number: MiniblockNumber(0), timestamp: 0, - hash: miniblock_hash(MiniblockNumber(0)), + hash: legacy_miniblock_hash(MiniblockNumber(0)), l1_tx_count: 0, l2_tx_count: 0, base_fee_per_gas: 0, @@ -274,13 +280,14 @@ pub(crate) async fn create_genesis_l1_batch( l2_fair_gas_price: 0, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(ProtocolVersionId::latest()), + virtual_blocks: 0, }; let mut transaction = storage.start_transaction().await; transaction .protocol_versions_dal() - .save_protocol_version(version) + .save_protocol_version_with_tx(version) .await; transaction .blocks_dal() @@ -370,13 +377,16 @@ mod tests { conn.blocks_dal().delete_genesis().await; let params = GenesisParams { + protocol_version: ProtocolVersionId::latest(), first_validator: Address::random(), base_system_contracts: BaseSystemContracts::load_from_disk(), system_contracts: get_system_smart_contracts(), first_l1_verifier_config: L1VerifierConfig::default(), first_verifier_address: Address::random(), }; - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms).await; + ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + .await + .unwrap(); assert!(!conn.blocks_dal().is_genesis_needed().await); let metadata = conn @@ -387,6 +397,8 @@ mod tests { assert_ne!(root_hash, H256::zero()); // Check that `ensure_genesis_state()` doesn't panic on repeated runs. - ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms).await; + ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms) + .await + .unwrap(); } } diff --git a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs index 43e713a51c12..94bf04dc1783 100644 --- a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -49,6 +49,26 @@ impl L1BatchMetricsReporter { block_metrics.push((l1_batch, format!("l1_mined_{}", tx_type.as_str()))) } + // todo: PLA-335 + // block_metrics.append( + // &mut conn + // .prover_dal() + // .get_proven_l1_batches() + // .into_iter() + // .map(|(l1_batch_number, stage)| (l1_batch_number, format!("prove_{:?}", stage))) + // .collect(), + // ); + + // todo: PLA-335 + // block_metrics.append( + // &mut conn + // .witness_generator_dal() + // .get_witness_generated_l1_batches() + // .into_iter() + // .map(|(l1_batch_number, stage)| (l1_batch_number, format!("wit_gen_{:?}", stage))) + // .collect(), + // ); + for (l1_batch_number, stage) in block_metrics { metrics::gauge!( "server.block_number", diff --git a/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs new file mode 100644 index 000000000000..16726f46f974 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs @@ -0,0 +1,54 @@ +use std::time::Duration; + +use async_trait::async_trait; +use zksync_dal::ConnectionPool; +use zksync_prover_utils::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct FriProofCompressorJobRetryManager { + pool: ConnectionPool, + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, +} + +impl FriProofCompressorJobRetryManager { + pub fn new( + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, + pool: ConnectionPool, + ) -> Self { + Self { + max_attempts, + processing_timeout, + retry_interval_ms, + pool, + } + } +} + +/// Invoked periodically to re-queue stuck fri prover jobs. +#[async_trait] +impl PeriodicJob for FriProofCompressorJobRetryManager { + const SERVICE_NAME: &'static str = "FriProofCompressorJobRetryManager"; + + async fn run_routine_task(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_proof_compressor_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + tracing::info!("re-queuing fri proof compressor job {:?}", stuck_job); + } + metrics::counter!("prover_fri.proof_compressor.requeued_jobs", job_len as u64); + } + + fn polling_interval_ms(&self) -> u64 { + self.retry_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs new file mode 100644 index 000000000000..d2bc42739a95 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -0,0 +1,65 @@ +use async_trait::async_trait; +use zksync_dal::ConnectionPool; +use zksync_types::proofs::JobCountStatistics; + +use zksync_prover_utils::periodic_job::PeriodicJob; + +const PROOF_COMPRESSOR_SERVICE_NAME: &str = "proof_compressor"; + +#[derive(Debug)] +pub struct FriProofCompressorStatsReporter { + reporting_interval_ms: u64, + pool: ConnectionPool, +} + +impl FriProofCompressorStatsReporter { + pub fn new(reporting_interval_ms: u64, pool: ConnectionPool) -> Self { + Self { + reporting_interval_ms, + pool, + } + } + + async fn get_job_statistics(pool: &ConnectionPool) -> JobCountStatistics { + pool.access_storage() + .await + .fri_proof_compressor_dal() + .get_jobs_stats() + .await + } +} + +/// Invoked periodically to push job statistics to Prometheus +/// Note: these values will be used for auto-scaling proof compressor +#[async_trait] +impl PeriodicJob for FriProofCompressorStatsReporter { + const SERVICE_NAME: &'static str = "ProofCompressorStatsReporter"; + + async fn run_routine_task(&mut self) { + let stats = Self::get_job_statistics(&self.pool).await; + + if stats.queued > 0 { + tracing::info!( + "Found {} free {} in progress proof compressor jobs", + stats.queued, + stats.in_progress + ); + } + + metrics::gauge!( + format!("prover_fri.{}.jobs", PROOF_COMPRESSOR_SERVICE_NAME), + stats.queued as f64, + "type" => "queued" + ); + + metrics::gauge!( + format!("prover_fri.{}.jobs", PROOF_COMPRESSOR_SERVICE_NAME), + stats.in_progress as f64, + "type" => "in_progress" + ); + } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs index 493dc3294204..c56ffae7f4c2 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs @@ -43,7 +43,7 @@ impl PeriodicJob for FriProverJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing fri prover job {:?}", stuck_job); + tracing::info!("re-queuing fri prover job {:?}", stuck_job); } metrics::counter!("server.prover_fri.requeued_jobs", job_len as u64); } diff --git a/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs index d037c9077d6d..47fefe524594 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -28,7 +28,7 @@ impl SchedulerCircuitQueuer { conn.fri_witness_generator_dal() .mark_scheduler_jobs_as_queued(l1_batch_number) .await; - vlog::info!( + tracing::info!( "Marked fri scheduler aggregation job for l1_batch {} as queued", l1_batch_number, ); diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs index 4e63b8e2506d..a839c19868de 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs @@ -37,7 +37,7 @@ impl FriWitnessGeneratorJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + tracing::info!("re-queuing fri witness input job {:?}", stuck_job); } metrics::counter!("server.witness_inputs_fri.requeued_jobs", job_len as u64); } @@ -52,7 +52,7 @@ impl FriWitnessGeneratorJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + tracing::info!("re-queuing fri witness input job {:?}", stuck_job); } metrics::counter!( "server.leaf_aggregations_jobs_fri.requeued_jobs", @@ -70,7 +70,7 @@ impl FriWitnessGeneratorJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + tracing::info!("re-queuing fri witness input job {:?}", stuck_job); } metrics::counter!( "server.node_aggregations_jobs_fri.requeued_jobs", @@ -88,7 +88,7 @@ impl FriWitnessGeneratorJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + tracing::info!("re-queuing fri witness input job {:?}", stuck_job); } metrics::counter!("server.scheduler_jobs_fri.requeued_jobs", job_len as u64); } diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index 1fb145953464..36203d09c588 100644 --- a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -55,7 +55,7 @@ impl FriWitnessGeneratorStatsReporter { fn emit_metrics_for_round(round: AggregationRound, stats: JobCountStatistics) { if stats.queued > 0 || stats.in_progress > 0 { - vlog::trace!( + tracing::trace!( "Found {} free and {} in progress {:?} FRI witness generators jobs", stats.queued, stats.in_progress, @@ -93,7 +93,7 @@ impl PeriodicJob for FriWitnessGeneratorStatsReporter { } if aggregated.queued > 0 { - vlog::trace!( + tracing::trace!( "Found {} free {} in progress witness generators jobs", aggregated.queued, aggregated.in_progress diff --git a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs index a575a5a50084..809b912199bf 100644 --- a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs +++ b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs @@ -78,7 +78,7 @@ impl GcsBlobCleaner { blob_urls: Vec<(i64, S)>, ) -> Vec { if !blob_urls.is_empty() { - vlog::info!("Found {} {bucket} for cleaning blobs", blob_urls.len()); + tracing::info!("Found {} {bucket} for cleaning blobs", blob_urls.len()); } for (_, url) in &blob_urls { diff --git a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index 7ffdb40b9577..315715221b7c 100644 --- a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -42,7 +42,7 @@ impl PeriodicJob for GpuProverQueueMonitor { for ((region, zone), num_gpu) in prover_gpu_count_per_region_zone { let synthesizers = self.synthesizer_per_gpu as u64 * num_gpu; if synthesizers > 0 { - vlog::info!( + tracing::info!( "Would be spawning {} circuit synthesizers in region {} zone {}", synthesizers, region, diff --git a/core/bin/zksync_core/src/house_keeper/mod.rs b/core/bin/zksync_core/src/house_keeper/mod.rs index ae5c8a991eb2..d28618b0f01f 100644 --- a/core/bin/zksync_core/src/house_keeper/mod.rs +++ b/core/bin/zksync_core/src/house_keeper/mod.rs @@ -1,4 +1,6 @@ pub mod blocks_state_reporter; +pub mod fri_proof_compressor_job_retry_manager; +pub mod fri_proof_compressor_queue_monitor; pub mod fri_prover_job_retry_manager; pub mod fri_prover_queue_monitor; pub mod fri_scheduler_circuit_queuer; diff --git a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs index 7e1259aeea07..6179bc8deb7b 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs @@ -44,7 +44,7 @@ impl PeriodicJob for ProverJobRetryManager { .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { - vlog::info!("re-queuing prover job {:?}", stuck_job); + tracing::info!("re-queuing prover job {:?}", stuck_job); } metrics::counter!("server.prover.requeued_jobs", job_len as u64); } diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs index 995942116bb6..794be9fcd6a4 100644 --- a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -25,7 +25,7 @@ impl WaitingToQueuedFriWitnessJobMover { .await; let len = l1_batch_numbers.len(); for (l1_batch_number, circuit_id) in l1_batch_numbers { - vlog::info!( + tracing::info!( "Marked fri leaf aggregation job for l1_batch {} and circuit_id {} as queued", l1_batch_number, circuit_id @@ -59,7 +59,7 @@ impl WaitingToQueuedFriWitnessJobMover { .await; let len = l1_batch_numbers.len(); for (l1_batch_number, circuit_id, depth) in l1_batch_numbers { - vlog::info!( + tracing::info!( "Marked fri node aggregation job for l1_batch {} and circuit_id {} depth {} as queued", l1_batch_number, circuit_id, diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs index 9d1bc60de953..8246722a994b 100644 --- a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs @@ -31,7 +31,7 @@ impl WaitingToQueuedWitnessJobMover { .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { - vlog::info!( + tracing::info!( "Marked leaf aggregation job for l1_batch {} as queued", l1_batch_number ); @@ -50,7 +50,7 @@ impl WaitingToQueuedWitnessJobMover { .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { - vlog::info!( + tracing::info!( "Marking node aggregation job for l1_batch {} as queued", l1_batch_number ); @@ -69,7 +69,7 @@ impl WaitingToQueuedWitnessJobMover { .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { - vlog::info!( + tracing::info!( "Marking scheduler aggregation job for l1_batch {} as queued", l1_batch_number ); diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index c6d58d346d5d..d91802462bcd 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -57,7 +57,7 @@ impl WitnessGeneratorStatsReporter { fn emit_metrics_for_round(round: AggregationRound, stats: JobCountStatistics) { if stats.queued > 0 || stats.in_progress > 0 { - vlog::trace!( + tracing::trace!( "Found {} free and {} in progress {:?} witness generators jobs", stats.queued, stats.in_progress, @@ -95,7 +95,7 @@ impl PeriodicJob for WitnessGeneratorStatsReporter { } if aggregated.queued > 0 { - vlog::trace!( + tracing::trace!( "Found {} free {} in progress witness generators jobs", aggregated.queued, aggregated.in_progress diff --git a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs index 07e8dcc8f014..0e9bb83aacaf 100644 --- a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/bounded_gas_adjuster.rs @@ -31,7 +31,7 @@ impl L1GasPriceProvider for BoundedGasAdjuster { fn estimate_effective_gas_price(&self) -> u64 { let default_gas_price = self.default_gas_adjuster.estimate_effective_gas_price(); if default_gas_price > self.max_gas_price { - vlog::warn!( + tracing::warn!( "Effective gas price is too high: {}, using max allowed: {}", default_gas_price, self.max_gas_price diff --git a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs index 8abeba5be7cb..e449197e0559 100644 --- a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs @@ -79,19 +79,20 @@ impl GasAdjuster { Ok(()) } - pub async fn run(self: Arc, stop_receiver: Receiver) { + pub async fn run(self: Arc, stop_receiver: Receiver) -> anyhow::Result<()> { loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, gas_adjuster is shutting down"); + tracing::info!("Stop signal received, gas_adjuster is shutting down"); break; } if let Err(err) = self.keep_updated().await { - vlog::warn!("Cannot add the base fee to gas statistics: {}", err); + tracing::warn!("Cannot add the base fee to gas statistics: {}", err); } tokio::time::sleep(self.config.poll_period()).await; } + Ok(()) } } diff --git a/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs index cdd40f5fa028..2244607a47e8 100644 --- a/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs +++ b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs @@ -43,17 +43,17 @@ impl MainNodeGasPriceFetcher { .expect("Unable to create a main node client") } - pub async fn run(self: Arc, stop_receiver: Receiver) { + pub async fn run(self: Arc, stop_receiver: Receiver) -> anyhow::Result<()> { loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, MainNodeGasPriceFetcher is shutting down"); + tracing::info!("Stop signal received, MainNodeGasPriceFetcher is shutting down"); break; } let main_node_gas_price = match self.client.get_l1_gas_price().await { Ok(price) => price, Err(err) => { - vlog::warn!("Unable to get the gas price: {}", err); + tracing::warn!("Unable to get the gas price: {}", err); // A delay to avoid spamming the main node with requests. tokio::time::sleep(SLEEP_INTERVAL).await; continue; @@ -63,6 +63,7 @@ impl MainNodeGasPriceFetcher { .store(main_node_gas_price.as_u64(), Ordering::Relaxed); tokio::time::sleep(SLEEP_INTERVAL).await; } + Ok(()) } } diff --git a/core/bin/zksync_core/src/l1_gas_price/singleton.rs b/core/bin/zksync_core/src/l1_gas_price/singleton.rs index 83faa7c96ffa..6d7b4897b3be 100644 --- a/core/bin/zksync_core/src/l1_gas_price/singleton.rs +++ b/core/bin/zksync_core/src/l1_gas_price/singleton.rs @@ -39,9 +39,10 @@ impl GasAdjusterSingleton { Arc::new(BoundedGasAdjuster::new(config.max_l1_gas_price(), adjuster)) } - pub fn run_if_initialized(self, stop_signal: watch::Receiver) -> Option> { - self.0 - .get() - .map(|adjuster| tokio::spawn(adjuster.clone().run(stop_signal))) + pub fn run_if_initialized( + self, + stop_signal: watch::Receiver, + ) -> Option>> { + Some(tokio::spawn(self.0.get()?.clone().run(stop_signal))) } } diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index 25105507ce62..05b297a9b81e 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -2,10 +2,11 @@ use std::{str::FromStr, sync::Arc, time::Instant}; +use anyhow::Context as _; use futures::channel::oneshot; +use prometheus_exporter::PrometheusExporterConfig; use tokio::{sync::watch, task::JoinHandle}; -use prometheus_exporter::run_prometheus_exporter; use zksync_circuit_breaker::{ facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, @@ -18,8 +19,8 @@ use zksync_config::configs::{ }, database::MerkleTreeMode, house_keeper::HouseKeeperConfig, - FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProofDataHandlerConfig, - ProverGroupConfig, WitnessGeneratorConfig, + FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, + ProofDataHandlerConfig, ProverGroupConfig, WitnessGeneratorConfig, }; use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, FetcherConfig, @@ -31,7 +32,7 @@ use zksync_dal::{ }; use zksync_eth_client::clients::http::QueryClient; use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; -use zksync_health_check::{CheckHealth, ReactiveHealthCheck}; +use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_utils::periodic_job::PeriodicJob; use zksync_queued_job_processor::JobProcessor; @@ -40,7 +41,7 @@ use zksync_types::{ proofs::AggregationRound, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PackedEthSignature, + Address, L2ChainId, PackedEthSignature, ProtocolVersionId, }; use zksync_verification_key_server::get_cached_commitments; @@ -49,6 +50,8 @@ use crate::api_server::tx_sender::TxSenderConfig; use crate::api_server::tx_sender::{TxSender, TxSenderBuilder}; use crate::api_server::web3::{state::InternalApiConfig, Namespace}; use crate::eth_sender::{Aggregator, EthTxManager}; +use crate::house_keeper::fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager; +use crate::house_keeper::fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter; use crate::house_keeper::fri_prover_job_retry_manager::FriProverJobRetryManager; use crate::house_keeper::fri_prover_queue_monitor::FriProverStatsReporter; use crate::house_keeper::fri_scheduler_circuit_queuer::SchedulerCircuitQueuer; @@ -106,15 +109,17 @@ pub async fn genesis_init( eth_sender: ÐSenderConfig, network_config: &NetworkConfig, contracts_config: &ContractsConfig, -) { - let mut storage = StorageProcessor::establish_connection(true).await; +) -> anyhow::Result<()> { + let mut storage = StorageProcessor::establish_connection(true) + .await + .context("establish_connection")?; let operator_address = PackedEthSignature::address_from_private_key( ð_sender .sender .private_key() - .expect("Private key is required for genesis init"), + .context("Private key is required for genesis init")?, ) - .expect("Failed to restore operator address from private key"); + .context("Failed to restore operator address from private key")?; genesis::ensure_genesis_state( &mut storage, @@ -122,6 +127,7 @@ pub async fn genesis_init( &genesis::GenesisParams { // We consider the operator to be the first validator for now. first_validator: operator_address, + protocol_version: ProtocolVersionId::latest(), base_system_contracts: BaseSystemContracts::load_from_disk(), system_contracts: get_system_smart_contracts(), first_verifier_address: contracts_config.verifier_addr, @@ -137,11 +143,12 @@ pub async fn genesis_init( }, }, ) - .await; + .await?; + Ok(()) } pub async fn is_genesis_needed() -> bool { - let mut storage = StorageProcessor::establish_connection(true).await; + let mut storage = StorageProcessor::establish_connection(true).await.unwrap(); storage.blocks_dal().is_genesis_needed().await } @@ -167,12 +174,15 @@ pub fn setup_sigint_handler() -> oneshot::Receiver<()> { pub enum Component { // Public Web3 API running on HTTP server. HttpApi, + // Public Web3 API running on HTTP/WebSocket server and redirect eth_getLogs to another method. + ApiTranslator, // Public Web3 API (including PubSub) running on WebSocket server. WsApi, // REST API for contract verification. ContractVerificationApi, // Metadata Calculator. Tree, + // TODO(BFT-273): Remove `TreeLightweight` component as obsolete TreeLightweight, TreeBackup, EthWatcher, @@ -207,6 +217,7 @@ impl FromStr for Components { Component::ContractVerificationApi, ])), "http_api" => Ok(Components(vec![Component::HttpApi])), + "http_api_translator" => Ok(Components(vec![Component::ApiTranslator])), "ws_api" => Ok(Components(vec![Component::WsApi])), "contract_verification_api" => Ok(Components(vec![Component::ContractVerificationApi])), "tree" | "tree_new" => Ok(Components(vec![Component::Tree])), @@ -265,22 +276,29 @@ impl FromStr for Components { pub async fn initialize_components( components: Vec, - use_prometheus_pushgateway: bool, + use_prometheus_push_gateway: bool, ) -> anyhow::Result<( - Vec>, + Vec>>, watch::Sender, oneshot::Receiver, HealthCheckHandle, )> { - vlog::info!("Starting the components: {components:?}"); + tracing::info!("Starting the components: {components:?}"); let db_config = DBConfig::from_env(); - let connection_pool = ConnectionPool::builder(DbVariant::Master).build().await; - let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover).build().await; + let connection_pool = ConnectionPool::builder(DbVariant::Master) + .build() + .await + .context("failed to build connection_pool")?; + let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover) + .build() + .await + .context("failed to build prover_connection_pool")?; let replica_connection_pool = ConnectionPool::builder(DbVariant::Replica) .set_statement_timeout(db_config.statement_timeout()) .build() - .await; + .await + .context("failed to build replica_connection_pool")?; let mut healthchecks: Vec> = Vec::new(); let contracts_config = ContractsConfig::from_env(); @@ -295,7 +313,8 @@ pub async fn initialize_components( &circuit_breaker_config, main_zksync_contract_address, ) - .await, + .await + .context("circuit_breakers_for_components")?, &circuit_breaker_config, ); circuit_breaker_checker.check().await.unwrap_or_else(|err| { @@ -307,29 +326,45 @@ pub async fn initialize_components( let (stop_sender, stop_receiver) = watch::channel(false); let (cb_sender, cb_receiver) = oneshot::channel(); + // Prometheus exporter and circuit breaker checker should run for every component configuration. let prom_config = PrometheusConfig::from_env(); - let mut task_futures: Vec> = vec![ - run_prometheus_exporter( - prom_config.listener_port, - use_prometheus_pushgateway.then(|| { - ( - prom_config.pushgateway_url.clone(), - prom_config.push_interval(), - ) - }), - ), + let prom_config = if use_prometheus_push_gateway { + PrometheusExporterConfig::push(prom_config.gateway_endpoint(), prom_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prom_config.listener_port) + }; + let prom_config = prom_config.with_new_facade(); + + let (prometheus_health_check, prometheus_health_updater) = + ReactiveHealthCheck::new("prometheus_exporter"); + healthchecks.push(Box::new(prometheus_health_check)); + let prometheus_task = prom_config.run(stop_receiver.clone()); + let prometheus_task = tokio::spawn(async move { + prometheus_health_updater.update(HealthStatus::Ready.into()); + let res = prometheus_task.await; + drop(prometheus_health_updater); + res + }); + + let mut task_futures: Vec>> = vec![ + prometheus_task, tokio::spawn(circuit_breaker_checker.run(cb_sender, stop_receiver.clone())), ]; if components.contains(&Component::WsApi) || components.contains(&Component::HttpApi) || components.contains(&Component::ContractVerificationApi) + || components.contains(&Component::ApiTranslator) { let api_config = ApiConfig::from_env(); let state_keeper_config = StateKeeperConfig::from_env(); let network_config = NetworkConfig::from_env(); - let tx_sender_config = TxSenderConfig::new(&state_keeper_config, &api_config.web3_json_rpc); + let tx_sender_config = TxSenderConfig::new( + &state_keeper_config, + &api_config.web3_json_rpc, + L2ChainId(network_config.zksync_network_id), + ); let internal_api_config = InternalApiConfig::new( &network_config, &api_config.web3_json_rpc, @@ -349,7 +384,7 @@ pub async fn initialize_components( )); let started_at = Instant::now(); - vlog::info!("initializing HTTP API"); + tracing::info!("initializing HTTP API"); let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; let (futures, health_check) = run_http_api( &tx_sender_config, @@ -361,12 +396,14 @@ pub async fn initialize_components( stop_receiver.clone(), bounded_gas_adjuster.clone(), state_keeper_config.save_call_traces, + components.contains(&Component::ApiTranslator), storage_caches.clone().unwrap(), ) - .await; + .await + .context("run_http_api")?; task_futures.extend(futures); healthchecks.push(Box::new(health_check)); - vlog::info!("initialized HTTP API in {:?}", started_at.elapsed()); + tracing::info!("initialized HTTP API in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "http_api"); } @@ -376,7 +413,7 @@ pub async fn initialize_components( }); let started_at = Instant::now(); - vlog::info!("initializing WS API"); + tracing::info!("initializing WS API"); let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; let (futures, health_check) = run_ws_api( &tx_sender_config, @@ -388,24 +425,26 @@ pub async fn initialize_components( replica_connection_pool.clone(), stop_receiver.clone(), storage_caches, + components.contains(&Component::ApiTranslator), ) - .await; + .await + .context("run_ws_api")?; task_futures.extend(futures); healthchecks.push(Box::new(health_check)); - vlog::info!("initialized WS API in {:?}", started_at.elapsed()); + tracing::info!("initialized WS API in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "ws_api"); } if components.contains(&Component::ContractVerificationApi) { let started_at = Instant::now(); - vlog::info!("initializing contract verification REST API"); + tracing::info!("initializing contract verification REST API"); task_futures.push(contract_verification::start_server_thread_detached( connection_pool.clone(), replica_connection_pool.clone(), api_config.contract_verification.clone(), stop_receiver.clone(), )); - vlog::info!( + tracing::info!( "initialized contract verification REST API in {:?}", started_at.elapsed() ); @@ -415,26 +454,31 @@ pub async fn initialize_components( if components.contains(&Component::StateKeeper) { let started_at = Instant::now(); - vlog::info!("initializing State Keeper"); + tracing::info!("initializing State Keeper"); let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; add_state_keeper_to_task_futures( &mut task_futures, &contracts_config, StateKeeperConfig::from_env(), + &NetworkConfig::from_env(), &db_config, &MempoolConfig::from_env(), bounded_gas_adjuster, stop_receiver.clone(), ) - .await; - vlog::info!("initialized State Keeper in {:?}", started_at.elapsed()); + .await + .context("add_state_keeper_to_task_futures()")?; + tracing::info!("initialized State Keeper in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "state_keeper"); } if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); - vlog::info!("initializing ETH-Watcher"); - let eth_watch_pool = ConnectionPool::singleton(DbVariant::Master).build().await; + tracing::info!("initializing ETH-Watcher"); + let eth_watch_pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .context("failed to build eth_watch_pool")?; task_futures.push( start_eth_watch( eth_watch_pool, @@ -444,15 +488,23 @@ pub async fn initialize_components( ) .await, ); - vlog::info!("initialized ETH-Watcher in {:?}", started_at.elapsed()); + tracing::info!("initialized ETH-Watcher in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_watcher"); } + let store_factory = ObjectStoreFactory::from_env(); + if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); - vlog::info!("initializing ETH-TxAggregator"); - let eth_sender_pool = ConnectionPool::singleton(DbVariant::Master).build().await; - let eth_sender_prover_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; + tracing::info!("initializing ETH-TxAggregator"); + let eth_sender_pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .context("failed to build eth_sender_pool")?; + let eth_sender_prover_pool = ConnectionPool::singleton(DbVariant::Prover) + .build() + .await + .context("failed to build eth_sender_prover_pool")?; let eth_sender = ETHSenderConfig::from_env(); let eth_client = @@ -460,7 +512,10 @@ pub async fn initialize_components( let nonce = eth_client.pending_nonce("eth_sender").await.unwrap(); let eth_tx_aggregator_actor = EthTxAggregator::new( eth_sender.sender.clone(), - Aggregator::new(eth_sender.sender.clone()), + Aggregator::new( + eth_sender.sender.clone(), + store_factory.create_store().await, + ), contracts_config.validator_timelock_addr, contracts_config.l1_multicall3_addr, main_zksync_contract_address, @@ -472,14 +527,17 @@ pub async fn initialize_components( eth_client, stop_receiver.clone(), ))); - vlog::info!("initialized ETH-TxAggregator in {:?}", started_at.elapsed()); + tracing::info!("initialized ETH-TxAggregator in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); } if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); - vlog::info!("initializing ETH-TxManager"); - let eth_manager_pool = ConnectionPool::singleton(DbVariant::Master).build().await; + tracing::info!("initializing ETH-TxManager"); + let eth_manager_pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .context("failed to build eth_manager_pool")?; let eth_sender = ETHSenderConfig::from_env(); let eth_client = PKSigningClient::from_config(ð_sender, &contracts_config, ð_client_config); @@ -491,7 +549,7 @@ pub async fn initialize_components( task_futures.extend([tokio::spawn( eth_tx_manager_actor.run(eth_manager_pool, stop_receiver.clone()), )]); - vlog::info!("initialized ETH-TxManager in {:?}", started_at.elapsed()); + tracing::info!("initialized ETH-TxManager in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); } @@ -499,18 +557,17 @@ pub async fn initialize_components( let started_at = Instant::now(); let fetcher_config = FetcherConfig::from_env(); let eth_network = chain::NetworkConfig::from_env(); - vlog::info!("initializing data fetchers"); + tracing::info!("initializing data fetchers"); task_futures.extend(run_data_fetchers( &fetcher_config, eth_network.network, connection_pool.clone(), stop_receiver.clone(), )); - vlog::info!("initialized data fetchers in {:?}", started_at.elapsed()); + tracing::info!("initialized data fetchers in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "data_fetchers"); } - let store_factory = ObjectStoreFactory::from_env(); add_trees_to_task_futures( &mut task_futures, &mut healthchecks, @@ -518,7 +575,8 @@ pub async fn initialize_components( &store_factory, stop_receiver.clone(), ) - .await; + .await + .context("add_trees_to_task_futures()")?; add_witness_generator_to_task_futures( &mut task_futures, &components, @@ -530,7 +588,9 @@ pub async fn initialize_components( .await; if components.contains(&Component::Housekeeper) { - add_house_keeper_to_task_futures(&mut task_futures, &store_factory).await; + add_house_keeper_to_task_futures(&mut task_futures, &store_factory) + .await + .context("add_house_keeper_to_task_futures()")?; } if components.contains(&Component::ProofDataHandler) { @@ -557,18 +617,23 @@ pub async fn initialize_components( Ok((task_futures, stop_sender, cb_receiver, health_check_handle)) } +#[allow(clippy::too_many_arguments)] async fn add_state_keeper_to_task_futures( - task_futures: &mut Vec>, + task_futures: &mut Vec>>, contracts_config: &ContractsConfig, state_keeper_config: StateKeeperConfig, + network_config: &NetworkConfig, db_config: &DBConfig, mempool_config: &MempoolConfig, gas_adjuster: Arc, stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { let fair_l2_gas_price = state_keeper_config.fair_l2_gas_price; let pool_builder = ConnectionPool::singleton(DbVariant::Master); - let state_keeper_pool = pool_builder.build().await; + let state_keeper_pool = pool_builder + .build() + .await + .context("failed to build state_keeper_pool")?; let next_priority_id = state_keeper_pool .access_storage() .await @@ -577,7 +642,10 @@ async fn add_state_keeper_to_task_futures>, + task_futures: &mut Vec>>, healthchecks: &mut Vec>, components: &[Component], store_factory: &ObjectStoreFactory, stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { if components.contains(&Component::TreeBackup) { panic!("Tree backup mode is disabled"); } @@ -636,12 +709,14 @@ async fn add_trees_to_task_futures( MerkleTreeMode::Lightweight => MetadataCalculatorModeConfig::Lightweight, MerkleTreeMode::Full => MetadataCalculatorModeConfig::Full { store_factory }, }, - (false, false) => return, + (false, false) => return Ok(()), }; - let (future, tree_health_check) = - run_tree(&db_config, &operation_config, mode, stop_receiver).await; + let (future, tree_health_check) = run_tree(&db_config, &operation_config, mode, stop_receiver) + .await + .context("run_tree()")?; task_futures.push(future); healthchecks.push(Box::new(tree_health_check)); + Ok(()) } async fn run_tree( @@ -649,34 +724,40 @@ async fn run_tree( operation_manager: &OperationsManagerConfig, mode: MetadataCalculatorModeConfig<'_>, stop_receiver: watch::Receiver, -) -> (JoinHandle<()>, ReactiveHealthCheck) { +) -> anyhow::Result<(JoinHandle>, ReactiveHealthCheck)> { let started_at = Instant::now(); let mode_str = if matches!(mode, MetadataCalculatorModeConfig::Full { .. }) { "full" } else { "lightweight" }; - vlog::info!("Initializing Merkle tree in {mode_str} mode"); + tracing::info!("Initializing Merkle tree in {mode_str} mode"); let config = MetadataCalculatorConfig::for_main_node(config, operation_manager, mode); let metadata_calculator = MetadataCalculator::new(&config).await; let tree_health_check = metadata_calculator.tree_health_check(); - let pool = ConnectionPool::singleton(DbVariant::Master).build().await; - let prover_pool = ConnectionPool::singleton(DbVariant::Prover).build().await; + let pool = ConnectionPool::singleton(DbVariant::Master) + .build() + .await + .context("failed to build connection pool")?; + let prover_pool = ConnectionPool::singleton(DbVariant::Prover) + .build() + .await + .context("failed to build prover_pool")?; let future = tokio::spawn(metadata_calculator.run(pool, prover_pool, stop_receiver)); - vlog::info!("Initialized {mode_str} tree in {:?}", started_at.elapsed()); + tracing::info!("Initialized {mode_str} tree in {:?}", started_at.elapsed()); metrics::gauge!( "server.init.latency", started_at.elapsed(), "stage" => "tree", "tree" => mode_str ); - (future, tree_health_check) + Ok((future, tree_health_check)) } async fn add_witness_generator_to_task_futures( - task_futures: &mut Vec>, + task_futures: &mut Vec>>, components: &[Component], connection_pool: &ConnectionPool, prover_connection_pool: &ConnectionPool, @@ -696,20 +777,19 @@ async fn add_witness_generator_to_task_futures( } }); - let vk_commitments = get_cached_commitments(); - let protocol_versions = prover_connection_pool - .access_storage() - .await - .protocol_versions_dal() - .protocol_version_for(&vk_commitments) - .await; - for (batch_size, component_type) in generator_params { let started_at = Instant::now(); - vlog::info!( + tracing::info!( "initializing the {component_type:?} witness generator, batch size: {batch_size:?}" ); + let vk_commitments = get_cached_commitments(); + let protocol_versions = prover_connection_pool + .access_storage() + .await + .protocol_versions_dal() + .protocol_version_for(&vk_commitments) + .await; let config = WitnessGeneratorConfig::from_env(); let task = match component_type { AggregationRound::BasicCircuits => { @@ -759,7 +839,7 @@ async fn add_witness_generator_to_task_futures( }; task_futures.push(task); - vlog::info!( + tracing::info!( "initialized {component_type:?} witness generator in {:?}", started_at.elapsed() ); @@ -772,11 +852,14 @@ async fn add_witness_generator_to_task_futures( } async fn add_house_keeper_to_task_futures( - task_futures: &mut Vec>, + task_futures: &mut Vec>>, store_factory: &ObjectStoreFactory, -) { +) -> anyhow::Result<()> { let house_keeper_config = HouseKeeperConfig::from_env(); - let connection_pool = ConnectionPool::singleton(DbVariant::Replica).build().await; + let connection_pool = ConnectionPool::singleton(DbVariant::Replica) + .build() + .await + .context("failed to build a connection pool")?; let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( house_keeper_config.l1_batch_metrics_reporting_interval_ms, connection_pool, @@ -785,7 +868,8 @@ async fn add_house_keeper_to_task_futures( let prover_connection_pool = ConnectionPool::builder(DbVariant::Prover) .set_max_size(Some(house_keeper_config.prover_db_pool_size)) .build() - .await; + .await + .context("failed to build a prover_connection_pool")?; let gpu_prover_queue = GpuProverQueueMonitor::new( ProverGroupConfig::from_env().synthesizer_per_gpu, house_keeper_config.gpu_prover_queue_reporting_interval_ms, @@ -867,11 +951,27 @@ async fn add_house_keeper_to_task_futures( prover_connection_pool.clone(), ); task_futures.push(tokio::spawn(fri_prover_stats_reporter.run())); + + let proof_compressor_config = FriProofCompressorConfig::from_env(); + let fri_proof_compressor_stats_reporter = FriProofCompressorStatsReporter::new( + house_keeper_config.fri_proof_compressor_stats_reporting_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(fri_proof_compressor_stats_reporter.run())); + + let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( + proof_compressor_config.max_attempts, + proof_compressor_config.generation_timeout(), + house_keeper_config.fri_proof_compressor_job_retrying_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(fri_proof_compressor_retry_manager.run())); + Ok(()) } fn build_storage_caches( replica_connection_pool: &ConnectionPool, - task_futures: &mut Vec>, + task_futures: &mut Vec>>, ) -> PostgresStorageCaches { let rpc_config = Web3JsonRpcConfig::from_env(); let factory_deps_capacity = rpc_config.factory_deps_cache_size() as u64; @@ -934,8 +1034,9 @@ async fn run_http_api( stop_receiver: watch::Receiver, gas_adjuster: Arc, with_debug_namespace: bool, + with_logs_request_translator_enabled: bool, storage_caches: PostgresStorageCaches, -) -> (Vec>, ReactiveHealthCheck) { +) -> anyhow::Result<(Vec>>, ReactiveHealthCheck)> { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, @@ -952,17 +1053,25 @@ async fn run_http_api( } else { Namespace::NON_DEBUG.to_vec() }; - - web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) - .http(api_config.web3_json_rpc.http_port) - .with_filter_limit(api_config.web3_json_rpc.filters_limit()) - .with_threads(api_config.web3_json_rpc.http_server_threads()) - .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) - .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) - .with_tx_sender(tx_sender, vm_barrier) - .enable_api_namespaces(namespaces) - .build(stop_receiver.clone()) + let last_miniblock_pool = ConnectionPool::singleton(DbVariant::Replica) + .build() .await + .context("failed to build last_miniblock_pool")?; + + let mut api_builder = + web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) + .http(api_config.web3_json_rpc.http_port) + .with_last_miniblock_pool(last_miniblock_pool) + .with_filter_limit(api_config.web3_json_rpc.filters_limit()) + .with_threads(api_config.web3_json_rpc.http_server_threads()) + .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) + .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) + .with_tx_sender(tx_sender, vm_barrier) + .enable_api_namespaces(namespaces); + if with_logs_request_translator_enabled { + api_builder = api_builder.enable_request_translator(); + } + Ok(api_builder.build(stop_receiver.clone()).await) } #[allow(clippy::too_many_arguments)] @@ -976,7 +1085,8 @@ async fn run_ws_api( replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, storage_caches: PostgresStorageCaches, -) -> (Vec>, ReactiveHealthCheck) { + with_logs_request_translator_enabled: bool, +) -> anyhow::Result<(Vec>>, ReactiveHealthCheck)> { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, @@ -987,19 +1097,33 @@ async fn run_ws_api( storage_caches, ) .await; - - web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool) - .ws(api_config.web3_json_rpc.ws_port) - .with_filter_limit(api_config.web3_json_rpc.filters_limit()) - .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) - .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) - .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) - .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) - .with_threads(api_config.web3_json_rpc.ws_server_threads()) - .with_tx_sender(tx_sender, vm_barrier) - .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) - .build(stop_receiver.clone()) + let last_miniblock_pool = ConnectionPool::singleton(DbVariant::Replica) + .build() .await + .context("failed to build last_miniblock_pool")?; + + let mut api_builder = + web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool) + .ws(api_config.web3_json_rpc.ws_port) + .with_last_miniblock_pool(last_miniblock_pool) + .with_filter_limit(api_config.web3_json_rpc.filters_limit()) + .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) + .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) + .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) + .with_websocket_requests_per_minute_limit( + api_config + .web3_json_rpc + .websocket_requests_per_minute_limit(), + ) + .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) + .with_threads(api_config.web3_json_rpc.ws_server_threads()) + .with_tx_sender(tx_sender, vm_barrier) + .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()); + + if with_logs_request_translator_enabled { + api_builder = api_builder.enable_request_translator(); + } + Ok(api_builder.build(stop_receiver.clone()).await) } async fn circuit_breakers_for_components( @@ -1007,7 +1131,7 @@ async fn circuit_breakers_for_components( web3_url: &str, circuit_breaker_config: &CircuitBreakerConfig, main_contract: Address, -) -> Vec> { +) -> anyhow::Result>> { let mut circuit_breakers: Vec> = Vec::new(); if components.iter().any(|c| { @@ -1016,7 +1140,10 @@ async fn circuit_breakers_for_components( Component::EthTxAggregator | Component::EthTxManager | Component::StateKeeper ) }) { - let pool = ConnectionPool::singleton(DbVariant::Replica).build().await; + let pool = ConnectionPool::singleton(DbVariant::Replica) + .build() + .await + .context("failed to build a connection pool")?; circuit_breakers.push(Box::new(FailedL1TransactionChecker { pool })); } @@ -1032,7 +1159,7 @@ async fn circuit_breakers_for_components( ))); } - circuit_breakers + Ok(circuit_breakers) } #[tokio::test] @@ -1042,5 +1169,5 @@ async fn test_house_keeper_components_get_added() { .unwrap(); // circuit-breaker, prometheus-exporter components are run, irrespective of other components. let always_running_component_count = 2; - assert_eq!(13, core_task_handles.len() - always_running_component_count); + assert_eq!(15, core_task_handles.len() - always_running_component_count); } diff --git a/core/bin/zksync_core/src/metadata_calculator/helpers.rs b/core/bin/zksync_core/src/metadata_calculator/helpers.rs index 925adbe50531..6269c6c3d25a 100644 --- a/core/bin/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/bin/zksync_core/src/metadata_calculator/helpers.rs @@ -57,7 +57,7 @@ impl AsyncTree { multi_get_chunk_size: usize, block_cache_capacity: usize, ) -> Self { - vlog::info!( + tracing::info!( "Initializing Merkle tree at `{db_path}` with {multi_get_chunk_size} multi-get chunk size, \ {block_cache_capacity}B block cache", db_path = db_path.display() @@ -179,7 +179,7 @@ impl L1BatchWithLogs { storage: &mut StorageProcessor<'_>, l1_batch_number: L1BatchNumber, ) -> Option { - vlog::debug!("Loading storage logs data for L1 batch #{l1_batch_number}"); + tracing::debug!("Loading storage logs data for L1 batch #{l1_batch_number}"); let load_changes_latency = TreeUpdateStage::LoadChanges.start(); let header_latency = LoadChangesStage::L1BatchHeader.start(); @@ -214,7 +214,7 @@ impl L1BatchWithLogs { // ^ The tree doesn't use the read value, so we set it to zero. storage_logs.insert(storage_key, log); } - vlog::debug!( + tracing::debug!( "Made touched slots disjoint with protective reads; remaining touched slots: {}", touched_slots.len() ); @@ -278,8 +278,8 @@ mod tests { use zksync_dal::ConnectionPool; use zksync_types::{ proofs::PrepareBasicCircuitsJob, protocol_version::L1VerifierConfig, - system_contracts::get_system_smart_contracts, Address, L2ChainId, StorageKey, - StorageLogKind, + system_contracts::get_system_smart_contracts, Address, L2ChainId, ProtocolVersionId, + StorageKey, StorageLogKind, }; use super::*; @@ -352,6 +352,7 @@ mod tests { fn mock_genesis_params() -> GenesisParams { GenesisParams { first_validator: Address::repeat_byte(0x01), + protocol_version: ProtocolVersionId::latest(), base_system_contracts: BaseSystemContracts::load_from_disk(), system_contracts: get_system_smart_contracts(), first_l1_verifier_config: L1VerifierConfig::default(), @@ -366,7 +367,8 @@ mod tests { L2ChainId(270), &mock_genesis_params(), ) - .await; + .await + .unwrap(); reset_db_state(&pool, 5).await; let mut storage = pool.access_storage().await; @@ -385,7 +387,9 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await; - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + .await + .unwrap(); let mut logs = gen_storage_logs(100..200, 2); for log in &mut logs[0] { @@ -461,7 +465,9 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await; - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + .await + .unwrap(); let mut logs = gen_storage_logs(100..120, 1); // Entire batch of no-op logs (writing previous values). @@ -506,7 +512,9 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_protective_reads(pool: ConnectionPool) { let mut storage = pool.access_storage().await; - ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()).await; + ensure_genesis_state(&mut storage, L2ChainId(270), &mock_genesis_params()) + .await + .unwrap(); let mut logs = gen_storage_logs(100..120, 1); let logs_copy = logs[0].clone(); diff --git a/core/bin/zksync_core/src/metadata_calculator/metrics.rs b/core/bin/zksync_core/src/metadata_calculator/metrics.rs index 510edad786fc..8b7477472f54 100644 --- a/core/bin/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/bin/zksync_core/src/metadata_calculator/metrics.rs @@ -99,11 +99,11 @@ impl UpdateTreeLatency { metrics::histogram!(S::HISTOGRAM_NAME, elapsed, "stage" => stage); if let Some(record_count) = record_count { - vlog::debug!( + tracing::debug!( "Metadata calculator stage `{stage}` with {record_count} records completed in {elapsed:?}" ); } else { - vlog::debug!("Metadata calculator stage `{stage}` completed in {elapsed:?}"); + tracing::debug!("Metadata calculator stage `{stage}` completed in {elapsed:?}"); } } } @@ -154,7 +154,7 @@ impl MetadataCalculator { let first_batch_number = batch_headers.first().unwrap().number.0; let last_batch_number = batch_headers.last().unwrap().number.0; - vlog::info!( + tracing::info!( "L1 batches #{:?} processed in tree", first_batch_number..=last_batch_number ); diff --git a/core/bin/zksync_core/src/metadata_calculator/mod.rs b/core/bin/zksync_core/src/metadata_calculator/mod.rs index ecfeb20dbe59..b642ba968c4c 100644 --- a/core/bin/zksync_core/src/metadata_calculator/mod.rs +++ b/core/bin/zksync_core/src/metadata_calculator/mod.rs @@ -100,6 +100,7 @@ pub struct MetadataCalculator { impl MetadataCalculator { /// Creates a calculator with the specified `config`. pub async fn new(config: &MetadataCalculatorConfig<'_>) -> Self { + // TODO (SMA-1726): restore the tree from backup if appropriate let mode = config.mode.to_mode(); let object_store = match config.mode { @@ -127,15 +128,16 @@ impl MetadataCalculator { pool: ConnectionPool, prover_pool: ConnectionPool, stop_receiver: watch::Receiver, - ) { - let update_task = self.updater.loop_updating_tree( - self.delayer, - &pool, - &prover_pool, - stop_receiver, - self.health_updater, - ); - update_task.await; + ) -> anyhow::Result<()> { + self.updater + .loop_updating_tree( + self.delayer, + &pool, + &prover_pool, + stop_receiver, + self.health_updater, + ) + .await } /// This is used to improve L1 gas estimation for the commit operation. The estimations are computed @@ -176,7 +178,7 @@ impl MetadataCalculator { header.base_system_contracts_hashes.default_aa, ); let commitment_hash = commitment.hash(); - vlog::trace!("L1 batch commitment: {commitment:?}"); + tracing::trace!("L1 batch commitment: {commitment:?}"); let metadata = L1BatchMetadata { root_hash: merkle_root_hash, @@ -193,7 +195,9 @@ impl MetadataCalculator { pass_through_data_hash: commitment_hash.pass_through_data, }; - vlog::trace!("L1 batch metadata: {metadata:?}"); + tracing::trace!("L1 batch metadata: {metadata:?}"); metadata } + + // TODO (SMA-1726): Integrate tree backup mode } diff --git a/core/bin/zksync_core/src/metadata_calculator/tests.rs b/core/bin/zksync_core/src/metadata_calculator/tests.rs index b216a81c8f23..42c182477a32 100644 --- a/core/bin/zksync_core/src/metadata_calculator/tests.rs +++ b/core/bin/zksync_core/src/metadata_calculator/tests.rs @@ -13,14 +13,14 @@ use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{ - block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{miniblock_hash, BlockGasCount, L1BatchHeader, MiniblockHeader}, proofs::PrepareBasicCircuitsJob, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, - AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageKey, StorageLog, - H256, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, + StorageKey, StorageLog, H256, }; -use zksync_utils::{miniblock_hash, u32_to_h256}; +use zksync_utils::u32_to_h256; use super::{ L1BatchWithLogs, MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, @@ -53,6 +53,7 @@ async fn genesis_creation(pool: ConnectionPool, prover_pool: ConnectionPool) { ); } +// TODO (SMA-1726): Restore tests for tree backup mode #[db_test] async fn basic_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { @@ -127,6 +128,7 @@ async fn status_receiver_has_correct_states(pool: ConnectionPool, prover_pool: C tokio::time::timeout(RUN_TIMEOUT, calculator_handle) .await .expect("timed out waiting for calculator") + .unwrap() .unwrap(); assert_eq!( tree_health_check.check_health().await.status(), @@ -215,6 +217,7 @@ async fn running_metadata_calculator_with_additional_blocks( tokio::time::timeout(RUN_TIMEOUT, calculator_handle) .await .expect("timed out waiting for calculator") + .unwrap() .unwrap(); // Switch to the full tree. It should pick up from the same spot and result in the same tree root hash. @@ -245,6 +248,7 @@ async fn shutting_down_calculator(pool: ConnectionPool, prover_pool: ConnectionP stop_sx.send_replace(true); run_with_timeout(RUN_TIMEOUT, calculator_task) .await + .unwrap() .unwrap(); } @@ -320,6 +324,7 @@ async fn test_postgres_backup_recovery( tokio::time::timeout(RUN_TIMEOUT, calculator_handle) .await .expect("timed out waiting for calculator") + .unwrap() .unwrap(); } @@ -386,6 +391,7 @@ async fn setup_calculator_with_options( let mut storage = pool.access_storage().await; if storage.blocks_dal().is_genesis_needed().await { let chain_id = L2ChainId(270); + let protocol_version = ProtocolVersionId::latest(); let base_system_contracts = BaseSystemContracts::load_from_disk(); let system_contracts = get_system_smart_contracts(); let first_validator = Address::repeat_byte(0x01); @@ -396,13 +402,15 @@ async fn setup_calculator_with_options( chain_id, &GenesisParams { first_validator, + protocol_version, base_system_contracts, system_contracts, first_l1_verifier_config, first_verifier_address, }, ) - .await; + .await + .unwrap(); } metadata_calculator } @@ -430,7 +438,9 @@ async fn run_calculator( root_hash }); - run_with_timeout(RUN_TIMEOUT, calculator.run(pool, prover_pool, stop_rx)).await; + run_with_timeout(RUN_TIMEOUT, calculator.run(pool, prover_pool, stop_rx)) + .await + .unwrap(); delayer_handle.await.unwrap() } @@ -477,7 +487,12 @@ pub(super) async fn extend_db_state( let miniblock_header = MiniblockHeader { number: miniblock_number, timestamp: header.timestamp, - hash: miniblock_hash(miniblock_number), + hash: miniblock_hash( + miniblock_number, + header.timestamp, + H256::zero(), + H256::zero(), + ), l1_tx_count: header.l1_tx_count, l2_tx_count: header.l2_tx_count, base_fee_per_gas: header.base_fee_per_gas, @@ -485,6 +500,7 @@ pub(super) async fn extend_db_state( l2_fair_gas_price: 0, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(Default::default()), + virtual_blocks: 0, }; storage @@ -603,6 +619,7 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { let mut storage = pool.access_storage().await; let first_validator = Address::repeat_byte(0x01); + let protocol_version = ProtocolVersionId::latest(); let base_system_contracts = BaseSystemContracts::load_from_disk(); let system_contracts = get_system_smart_contracts(); let first_l1_verifier_config = L1VerifierConfig::default(); @@ -611,6 +628,7 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { &mut storage, L2ChainId(270), &GenesisParams { + protocol_version, first_validator, base_system_contracts, system_contracts, @@ -618,7 +636,8 @@ async fn deduplication_works_as_expected(pool: ConnectionPool) { first_verifier_address, }, ) - .await; + .await + .unwrap(); let logs = gen_storage_logs(100..120, 1).pop().unwrap(); let hashed_keys: Vec<_> = logs.iter().map(|log| log.key.hashed_key()).collect(); diff --git a/core/bin/zksync_core/src/metadata_calculator/updater.rs b/core/bin/zksync_core/src/metadata_calculator/updater.rs index 21d937f846e2..972e495c2ff9 100644 --- a/core/bin/zksync_core/src/metadata_calculator/updater.rs +++ b/core/bin/zksync_core/src/metadata_calculator/updater.rs @@ -1,5 +1,5 @@ //! Tree updater trait and its implementations. - +use anyhow::Context as _; use futures::{future, FutureExt}; use tokio::sync::watch; @@ -78,7 +78,7 @@ impl TreeUpdater { .unwrap(); save_witnesses_latency.report(); - vlog::info!( + tracing::info!( "Saved witnesses for L1 batch #{l1_batch_number} to object storage at `{object_key}`" ); Some(object_key) @@ -107,7 +107,7 @@ impl TreeUpdater { l1_batch_numbers: ops::RangeInclusive, ) -> L1BatchNumber { let start = Instant::now(); - vlog::info!("Processing L1 batches #{l1_batch_numbers:?}"); + tracing::info!("Processing L1 batches #{l1_batch_numbers:?}"); let first_l1_batch_number = L1BatchNumber(*l1_batch_numbers.start()); let last_l1_batch_number = L1BatchNumber(*l1_batch_numbers.end()); let mut l1_batch_data = L1BatchWithLogs::new(storage, first_l1_batch_number).await; @@ -156,17 +156,38 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { + let protocol_version_id = storage + .blocks_dal() + .get_batch_protocol_version_id(l1_batch_number) + .await; + if let Some(id) = protocol_version_id { + if !prover_storage + .protocol_versions_dal() + .prover_protocol_version_exists(id) + .await + { + let protocol_version = storage + .protocol_versions_dal() + .get_protocol_version(id) + .await + .unwrap(); + prover_storage + .protocol_versions_dal() + .save_prover_protocol_version(protocol_version) + .await; + } + } prover_storage .witness_generator_dal() - .save_witness_inputs(l1_batch_number, object_key) + .save_witness_inputs(l1_batch_number, object_key, protocol_version_id) .await; - prover_storage - .fri_witness_generator_dal() - .save_witness_inputs(l1_batch_number, object_key) + storage + .proof_generation_dal() + .insert_proof_generation_details(l1_batch_number, object_key) .await; } save_postgres_latency.report(); - vlog::info!("Updated metadata for L1 batch #{l1_batch_number} in Postgres"); + tracing::info!("Updated metadata for L1 batch #{l1_batch_number} in Postgres"); previous_root_hash = metadata.merkle_root_hash; updated_headers.push(header); @@ -193,11 +214,11 @@ impl TreeUpdater { let last_requested_l1_batch = last_requested_l1_batch.min(last_sealed_l1_batch.0); let l1_batch_numbers = next_l1_batch_to_seal.0..=last_requested_l1_batch; if l1_batch_numbers.is_empty() { - vlog::trace!( + tracing::trace!( "No L1 batches to seal: batch numbers range to be loaded {l1_batch_numbers:?} is empty" ); } else { - vlog::info!("Updating Merkle tree with L1 batches #{l1_batch_numbers:?}"); + tracing::info!("Updating Merkle tree with L1 batches #{l1_batch_numbers:?}"); *next_l1_batch_to_seal = self .process_multiple_batches(&mut storage, &mut prover_storage, l1_batch_numbers) .await; @@ -212,15 +233,15 @@ impl TreeUpdater { prover_pool: &ConnectionPool, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, - ) { + ) -> anyhow::Result<()> { let mut storage = pool.access_storage_tagged("metadata_calculator").await; // Ensure genesis creation let tree = &mut self.tree; if tree.is_empty() { - let Some(logs) = L1BatchWithLogs::new(&mut storage, L1BatchNumber(0)).await else { - panic!("Missing storage logs for the genesis L1 batch"); - }; + let logs = L1BatchWithLogs::new(&mut storage, L1BatchNumber(0)) + .await + .context("Missing storage logs for the genesis L1 batch")?; tree.process_l1_batch(logs.storage_logs).await; tree.save().await; } @@ -233,7 +254,7 @@ impl TreeUpdater { .await; drop(storage); - vlog::info!( + tracing::info!( "Initialized metadata calculator with {max_batches_per_iter} max L1 batches per iteration. \ Next L1 batch for Merkle tree: {next_l1_batch_to_seal}, current Postgres L1 batch: {current_db_batch}, \ last L1 batch with metadata: {last_l1_batch_with_metadata}", @@ -252,11 +273,11 @@ impl TreeUpdater { if next_l1_batch_to_seal > last_l1_batch_with_metadata + 1 { // Check stop signal before proceeding with a potentially time-consuming operation. if *stop_receiver.borrow_and_update() { - vlog::info!("Stop signal received, metadata_calculator is shutting down"); - return; + tracing::info!("Stop signal received, metadata_calculator is shutting down"); + return Ok(()); } - vlog::warn!( + tracing::warn!( "Next L1 batch of the tree ({next_l1_batch_to_seal}) is greater than last L1 batch with metadata in Postgres \ ({last_l1_batch_with_metadata}); this may be a result of restoring Postgres from a snapshot. \ Truncating Merkle tree versions so that this mismatch is fixed..." @@ -264,7 +285,7 @@ impl TreeUpdater { tree.revert_logs(last_l1_batch_with_metadata); tree.save().await; next_l1_batch_to_seal = tree.next_l1_batch_number(); - vlog::info!("Truncated Merkle tree to L1 batch #{next_l1_batch_to_seal}"); + tracing::info!("Truncated Merkle tree to L1 batch #{next_l1_batch_to_seal}"); let health = TreeHealthCheckDetails { mode: self.mode, @@ -275,7 +296,7 @@ impl TreeUpdater { loop { if *stop_receiver.borrow_and_update() { - vlog::info!("Stop signal received, metadata_calculator is shutting down"); + tracing::info!("Stop signal received, metadata_calculator is shutting down"); break; } let storage = pool.access_storage_tagged("metadata_calculator").await; @@ -287,7 +308,7 @@ impl TreeUpdater { self.step(storage, prover_storage, &mut next_l1_batch_to_seal) .await; let delay = if snapshot == *next_l1_batch_to_seal { - vlog::trace!( + tracing::trace!( "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) \ didn't make any progress; delaying it using {delayer:?}" ); @@ -299,7 +320,7 @@ impl TreeUpdater { }; health_updater.update(health.into()); - vlog::trace!( + tracing::trace!( "Metadata calculator (next L1 batch: #{next_l1_batch_to_seal}) made progress from #{snapshot}" ); future::ready(()).right_future() @@ -309,13 +330,14 @@ impl TreeUpdater { // and the stop receiver still allows to be more responsive during shutdown. tokio::select! { _ = stop_receiver.changed() => { - vlog::info!("Stop signal received, metadata_calculator is shutting down"); + tracing::info!("Stop signal received, metadata_calculator is shutting down"); break; } () = delay => { /* The delay has passed */ } } } drop(health_updater); // Explicitly mark where the updater should be dropped + Ok(()) } async fn check_initial_writes_consistency( @@ -323,22 +345,18 @@ impl TreeUpdater { l1_batch_number: L1BatchNumber, tree_initial_writes: &[InitialStorageWrite], ) { - let pg_initial_writes: Vec<_> = connection + let pg_initial_writes = connection .storage_logs_dedup_dal() .initial_writes_for_batch(l1_batch_number) .await; - let pg_initial_writes: Option> = pg_initial_writes + let pg_initial_writes: Vec<_> = pg_initial_writes .into_iter() .map(|(key, index)| { let key = U256::from_little_endian(key.as_bytes()); - Some((key, index?)) + (key, index) }) .collect(); - let Some(pg_initial_writes) = pg_initial_writes else { - vlog::info!("Skipping indices consistency check as they are missing in Postgres for L1 batch {l1_batch_number}"); - return; - }; let tree_initial_writes: Vec<_> = tree_initial_writes .iter() diff --git a/core/bin/zksync_core/src/proof_data_handler/mod.rs b/core/bin/zksync_core/src/proof_data_handler/mod.rs index 7c9c02bbad4a..f74983eadc01 100644 --- a/core/bin/zksync_core/src/proof_data_handler/mod.rs +++ b/core/bin/zksync_core/src/proof_data_handler/mod.rs @@ -1,28 +1,50 @@ +use crate::proof_data_handler::request_processor::RequestProcessor; +use anyhow::Context as _; use axum::extract::Path; use axum::{routing::post, Json, Router}; use std::net::SocketAddr; use tokio::sync::watch; - -use zksync_config::configs::ProofDataHandlerConfig; +use zksync_config::{ + configs::{proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig}, + ContractsConfig, +}; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}; - -use crate::proof_data_handler::request_processor::RequestProcessor; +use zksync_types::{ + protocol_version::{L1VerifierConfig, VerifierParams}, + prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}, + H256, +}; mod request_processor; +fn fri_l1_verifier_config_from_env() -> L1VerifierConfig { + let config = ContractsConfig::from_env(); + L1VerifierConfig { + params: VerifierParams { + recursion_node_level_vk_hash: config.fri_recursion_node_level_vk_hash, + recursion_leaf_level_vk_hash: config.fri_recursion_leaf_level_vk_hash, + // The base layer commitment is not used in the FRI prover verification. + recursion_circuits_set_vks_hash: H256::zero(), + }, + recursion_scheduler_level_vk_hash: config.fri_recursion_scheduler_level_vk_hash, + } +} + pub(crate) async fn run_server( config: ProofDataHandlerConfig, blob_store: Box, pool: ConnectionPool, mut stop_receiver: watch::Receiver, -) { +) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - vlog::debug!("Starting proof data handler server on {bind_address}"); - + tracing::debug!("Starting proof data handler server on {bind_address}"); + let l1_verifier_config: Option = match config.protocol_version_loading_mode { + ProtocolVersionLoadingMode::FromDb => None, + ProtocolVersionLoadingMode::FromEnvVar => Some(fri_l1_verifier_config_from_env()), + }; let get_proof_gen_processor = - RequestProcessor::new(blob_store, pool, config.proof_generation_timeout()); + RequestProcessor::new(blob_store, pool, config, l1_verifier_config); let submit_proof_processor = get_proof_gen_processor.clone(); let app = Router::new() .route( @@ -52,11 +74,12 @@ pub(crate) async fn run_server( .serve(app.into_make_service()) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { - vlog::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); + tracing::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); } - vlog::info!("Stop signal received, proof data handler server is shutting down"); + tracing::info!("Stop signal received, proof data handler server is shutting down"); }) .await - .expect("Proof data handler server failed"); - vlog::info!("Proof data handler server shut down"); + .context("Proof data handler server failed")?; + tracing::info!("Proof data handler server shut down"); + Ok(()) } diff --git a/core/bin/zksync_core/src/proof_data_handler/request_processor.rs b/core/bin/zksync_core/src/proof_data_handler/request_processor.rs index a868dd6106b4..826317963763 100644 --- a/core/bin/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/bin/zksync_core/src/proof_data_handler/request_processor.rs @@ -1,23 +1,30 @@ -use std::sync::Arc; -use std::time::Duration; - use axum::extract::Path; use axum::response::Response; use axum::{http::StatusCode, response::IntoResponse, Json}; +use std::convert::TryFrom; +use std::sync::Arc; +use zksync_config::configs::{ + proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig, +}; use zksync_dal::{ConnectionPool, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_types::prover_server_api::{ - ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, - SubmitProofRequest, SubmitProofResponse, +use zksync_types::protocol_version::FriProtocolVersionId; +use zksync_types::{ + protocol_version::L1VerifierConfig, + prover_server_api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, + }, + L1BatchNumber, }; -use zksync_types::L1BatchNumber; #[derive(Clone)] pub(crate) struct RequestProcessor { blob_store: Arc, pool: ConnectionPool, - proof_generation_timeout: Duration, + config: ProofDataHandlerConfig, + l1_verifier_config: Option, } pub(crate) enum RequestProcessorError { @@ -34,14 +41,14 @@ impl IntoResponse for RequestProcessorError { "No pending batches to process".to_owned(), ), RequestProcessorError::ObjectStore(err) => { - vlog::error!("GCS error: {:?}", err); + tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } RequestProcessorError::Sqlx(err) => { - vlog::error!("Sqlx error: {:?}", err); + tracing::error!("Sqlx error: {:?}", err); match err { SqlxError::RowNotFound => { (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) @@ -61,12 +68,14 @@ impl RequestProcessor { pub(crate) fn new( blob_store: Box, pool: ConnectionPool, - proof_generation_timeout: Duration, + config: ProofDataHandlerConfig, + l1_verifier_config: Option, ) -> Self { Self { blob_store: Arc::from(blob_store), pool, - proof_generation_timeout, + config, + l1_verifier_config, } } @@ -74,14 +83,14 @@ impl RequestProcessor { &self, request: Json, ) -> Result, RequestProcessorError> { - vlog::info!("Received request for proof generation data: {:?}", request); + tracing::info!("Received request for proof generation data: {:?}", request); let l1_batch_number = self .pool .access_storage() .await .proof_generation_dal() - .get_next_block_to_be_proven(self.proof_generation_timeout) + .get_next_block_to_be_proven(self.config.proof_generation_timeout()) .await .ok_or(RequestProcessorError::NoPendingBatches)?; @@ -91,9 +100,25 @@ impl RequestProcessor { .await .map_err(RequestProcessorError::ObjectStore)?; + let fri_protocol_version_id = + FriProtocolVersionId::try_from(self.config.fri_protocol_version_id) + .expect("Invalid FRI protocol version id"); + + let l1_verifier_config= match self.config.protocol_version_loading_mode { + ProtocolVersionLoadingMode::FromDb => { + panic!("Loading protocol version from db is not implemented yet") + } + ProtocolVersionLoadingMode::FromEnvVar => { + self.l1_verifier_config + .expect("l1_verifier_config must be set while running ProtocolVersionLoadingMode::FromEnvVar mode") + } + }; + let proof_gen_data = ProofGenerationData { l1_batch_number, data: blob, + fri_protocol_version_id, + l1_verifier_config, }; Ok(Json(ProofGenerationDataResponse::Success(proof_gen_data))) @@ -104,21 +129,33 @@ impl RequestProcessor { Path(l1_batch_number): Path, Json(payload): Json, ) -> Result, RequestProcessorError> { - vlog::info!("Received proof for block number: {:?}", l1_batch_number); + tracing::info!("Received proof for block number: {:?}", l1_batch_number); let l1_batch_number = L1BatchNumber(l1_batch_number); + match payload { + SubmitProofRequest::Proof(proof) => { + let blob_url = self + .blob_store + .put(l1_batch_number, &*proof) + .await + .map_err(RequestProcessorError::ObjectStore)?; - let blob_url = self - .blob_store - .put(l1_batch_number, &payload.proof) - .await - .map_err(RequestProcessorError::ObjectStore)?; - - let mut storage = self.pool.access_storage().await; - storage - .proof_generation_dal() - .save_proof_artifacts_metadata(l1_batch_number, &blob_url) - .await - .map_err(RequestProcessorError::Sqlx)?; + let mut storage = self.pool.access_storage().await; + storage + .proof_generation_dal() + .save_proof_artifacts_metadata(l1_batch_number, &blob_url) + .await + .map_err(RequestProcessorError::Sqlx)?; + } + SubmitProofRequest::SkippedProofGeneration => { + self.pool + .access_storage() + .await + .proof_generation_dal() + .mark_proof_generation_job_as_skipped(l1_batch_number) + .await + .map_err(RequestProcessorError::Sqlx)?; + } + } Ok(Json(SubmitProofResponse::Success)) } diff --git a/core/bin/zksync_core/src/reorg_detector/mod.rs b/core/bin/zksync_core/src/reorg_detector/mod.rs index 78d5a842efc9..3695ef3553f3 100644 --- a/core/bin/zksync_core/src/reorg_detector/mod.rs +++ b/core/bin/zksync_core/src/reorg_detector/mod.rs @@ -71,6 +71,8 @@ impl ReorgDetector { /// Localizes a reorg: performs binary search to determine the last non-diverged block. async fn detect_reorg(&self, diverged_l1_batch: L1BatchNumber) -> RpcResult { + // TODO (BFT-176, BFT-181): We have to look through the whole history, since batch status updater may mark + // a block as executed even if the state diverges for it. binary_search_with(1, diverged_l1_batch.0, |number| { self.root_hashes_match(L1BatchNumber(number)) }) @@ -83,8 +85,8 @@ impl ReorgDetector { match self.run_inner().await { Ok(l1_batch_number) => return l1_batch_number, Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { - vlog::warn!("Following transport error occurred: {err}"); - vlog::info!("Trying again after a delay"); + tracing::warn!("Following transport error occurred: {err}"); + tracing::info!("Trying again after a delay"); tokio::time::sleep(SLEEP_INTERVAL).await; } Err(err) => { @@ -139,7 +141,7 @@ impl ReorgDetector { .is_legally_ahead_of_main_node(sealed_l1_batch_number) .await? { - vlog::trace!( + tracing::trace!( "Local state was updated ahead of the main node. Waiting for the main node to seal the batch" ); tokio::time::sleep(SLEEP_INTERVAL).await; @@ -147,7 +149,7 @@ impl ReorgDetector { } // At this point we're certain that if we detect a reorg, it's real. - vlog::trace!("Checking for reorgs - L1 batch #{sealed_l1_batch_number}"); + tracing::trace!("Checking for reorgs - L1 batch #{sealed_l1_batch_number}"); if self.root_hashes_match(sealed_l1_batch_number).await? { metrics::gauge!( "external_node.last_correct_batch", @@ -156,13 +158,15 @@ impl ReorgDetector { ); tokio::time::sleep(SLEEP_INTERVAL).await; } else { - vlog::warn!( + tracing::warn!( "Reorg detected: last state hash doesn't match the state hash from main node \ (L1 batch #{sealed_l1_batch_number})" ); - vlog::info!("Searching for the first diverged batch"); + tracing::info!("Searching for the first diverged batch"); let last_correct_l1_batch = self.detect_reorg(sealed_l1_batch_number).await?; - vlog::info!("Reorg localized: last correct L1 batch is #{last_correct_l1_batch}"); + tracing::info!( + "Reorg localized: last correct L1 batch is #{last_correct_l1_batch}" + ); return Ok(last_correct_l1_batch); } } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs index fce87a613338..7d768f7cde20 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -1,30 +1,31 @@ +use std::fmt; +use std::sync::Arc; +use std::time::Instant; + use async_trait::async_trait; +use once_cell::sync::OnceCell; use tokio::{ sync::{mpsc, oneshot}, task::JoinHandle, }; -use std::{collections::HashSet, fmt, time::Instant}; - -use multivm::{ - init_vm, init_vm_with_gas_limit, BlockProperties, OracleTools, VmInstance, VmVersion, -}; +use multivm::{MultivmTracer, VmInstance, VmInstanceData}; use vm::{ - vm::{VmPartialExecutionResult, VmTxExecutionResult}, - vm_with_bootloader::{BootloaderJobType, TxExecutionMode}, - TxRevertReason, VmBlockResult, + CallTracer, ExecutionResult, FinishedL1Batch, Halt, HistoryEnabled, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionResultAndLogs, }; use zksync_dal::ConnectionPool; -use zksync_state::{RocksdbStorage, StorageView}; -use zksync_types::{tx::ExecutionMetrics, L1BatchNumber, Transaction, U256}; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; +use zksync_state::{ReadStorage, RocksdbStorage, StorageView}; +use zksync_types::{vm_trace::Call, witness_block_state::WitnessBlockState, Transaction, U256}; + +use zksync_utils::bytecode::CompressedBytecodeInfo; #[cfg(test)] mod tests; use crate::{ gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}, - state_keeper::{io::L1BatchParams, types::ExecutionMetricsForCriteria}, + state_keeper::types::ExecutionMetricsForCriteria, }; /// Representation of a transaction executed in the virtual machine. @@ -32,14 +33,15 @@ use crate::{ pub(crate) enum TxExecutionResult { /// Successful execution of the tx and the block tip dry run. Success { - tx_result: Box, + tx_result: Box, tx_metrics: ExecutionMetricsForCriteria, bootloader_dry_run_metrics: ExecutionMetricsForCriteria, - bootloader_dry_run_result: Box, + bootloader_dry_run_result: Box, compressed_bytecodes: Vec, + call_tracer_result: Vec, }, /// The VM rejected the tx for some reason. - RejectedByVm { rejection_reason: TxRevertReason }, + RejectedByVm { reason: Halt }, /// Bootloader gas limit is not enough to execute the tx. BootloaderOutOfGasForTx, /// Bootloader gas limit is enough to run the tx but not enough to execute block tip. @@ -48,82 +50,29 @@ pub(crate) enum TxExecutionResult { impl TxExecutionResult { /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. - pub(super) fn err(&self) -> Option<&TxRevertReason> { + pub(super) fn err(&self) -> Option<&Halt> { match self { Self::Success { .. } => None, - Self::RejectedByVm { rejection_reason } => Some(rejection_reason), + Self::RejectedByVm { + reason: rejection_reason, + } => Some(rejection_reason), Self::BootloaderOutOfGasForTx | Self::BootloaderOutOfGasForBlockTip { .. } => { - Some(&TxRevertReason::BootloaderOutOfGas) + Some(&Halt::BootloaderOutOfGas) } } } } -/// Configuration for the MultiVM. -/// Currently, represents an ordered sequence of (min_batch_number, vm_version) entries, -/// which will be scanned by the MultiVM on each batch. -#[derive(Debug, Clone)] -pub struct MultiVMConfig { - versions: Vec<(L1BatchNumber, VmVersion)>, -} - -impl MultiVMConfig { - /// Creates a new MultiVM config from the provided sequence of (min_batch_number, vm_version) entries. - /// - /// ## Panics - /// - /// Panics if the provided sequence is not ordered by the batch number, if it's empty or if the first entry - /// doesn't correspond to the batch #1. - pub fn new(versions: Vec<(L1BatchNumber, VmVersion)>) -> Self { - // Must-haves: config is not empty, we start from the first batch, config is ordered. - assert!(!versions.is_empty()); - assert_eq!(versions[0].0 .0, 1); - assert!(versions.windows(2).all(|w| w[0].0 < w[1].0)); - - Self { versions } - } - - /// Finds the appropriate VM version for the provided batch number. - pub fn version_for(&self, batch_number: L1BatchNumber) -> VmVersion { - debug_assert!( - batch_number != L1BatchNumber(0), - "Genesis block doesn't need to be actually executed" - ); - // Find the latest version which is not greater than the provided batch number. - let (_, version) = *self - .versions - .iter() - .rev() - .find(|(version_start, _)| batch_number >= *version_start) - .expect("At least one version must match"); - version - } - - /// Returns the config for mainnet. - /// This method is WIP, and returned config is not guaranteed to be full or correct. - pub fn mainnet_config_wip() -> Self { - Self::new(vec![ - (L1BatchNumber(1), VmVersion::M5WithoutRefunds), - (L1BatchNumber(292), VmVersion::M5WithRefunds), - (L1BatchNumber(360), VmVersion::M6Initial), - (L1BatchNumber(390), VmVersion::M6BugWithCompressionFixed), - (L1BatchNumber(49508), VmVersion::Vm1_3_2), - ]) - } - - /// Returns the config for testnet. - /// This method is WIP, and returned config is not guaranteed to be full or correct. - pub fn testnet_config_wip() -> Self { - Self::new(vec![(L1BatchNumber(1), VmVersion::M5WithoutRefunds)]) - } -} - /// An abstraction that allows us to create different kinds of batch executors. /// The only requirement is to return a [`BatchExecutorHandle`], which does its work /// by communicating with the externally initialized thread. #[async_trait] pub trait L1BatchExecutorBuilder: 'static + Send + Sync + fmt::Debug { - async fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle; + async fn init_batch( + &self, + l1_batch_params: L1BatchEnv, + system_env: SystemEnv, + ) -> BatchExecutorHandle; } /// The default implementation of [`L1BatchExecutorBuilder`]. @@ -134,8 +83,7 @@ pub struct MainBatchExecutorBuilder { pool: ConnectionPool, save_call_traces: bool, max_allowed_tx_gas_limit: U256, - validation_computational_gas_limit: u32, - multivm_config: Option, + upload_witness_inputs_to_gcs: bool, } impl MainBatchExecutorBuilder { @@ -144,40 +92,32 @@ impl MainBatchExecutorBuilder { pool: ConnectionPool, max_allowed_tx_gas_limit: U256, save_call_traces: bool, - validation_computational_gas_limit: u32, - multivm_config: Option, + upload_witness_inputs_to_gcs: bool, ) -> Self { Self { state_keeper_db_path, pool, save_call_traces, max_allowed_tx_gas_limit, - validation_computational_gas_limit, - multivm_config, + upload_witness_inputs_to_gcs, } } } #[async_trait] impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { - async fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle { + async fn init_batch( + &self, + l1_batch_params: L1BatchEnv, + system_env: SystemEnv, + ) -> BatchExecutorHandle { let mut secondary_storage = RocksdbStorage::new(self.state_keeper_db_path.as_ref()); let mut conn = self.pool.access_storage_tagged("state_keeper").await; secondary_storage.update_from_postgres(&mut conn).await; drop(conn); - let batch_number = l1_batch_params - .context_mode - .inner_block_context() - .context - .block_number; - let vm_version = self - .multivm_config - .as_ref() - .map(|config| config.version_for(L1BatchNumber(batch_number))) - .unwrap_or(VmVersion::latest()); - - vlog::info!( + let batch_number = l1_batch_params.number; + tracing::info!( "Secondary storage for batch {batch_number} initialized, size is {}", secondary_storage.estimated_map_size() ); @@ -186,13 +126,12 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { secondary_storage.estimated_map_size() as f64, ); BatchExecutorHandle::new( - vm_version, self.save_call_traces, self.max_allowed_tx_gas_limit, - self.validation_computational_gas_limit, secondary_storage, l1_batch_params, - None, + system_env, + self.upload_witness_inputs_to_gcs, ) } } @@ -207,29 +146,33 @@ pub struct BatchExecutorHandle { } impl BatchExecutorHandle { + // TODO: to be removed once testing in stage2 is done + #[allow(clippy::too_many_arguments)] pub(super) fn new( - vm_version: VmVersion, save_call_traces: bool, max_allowed_tx_gas_limit: U256, - validation_computational_gas_limit: u32, secondary_storage: RocksdbStorage, - l1_batch_params: L1BatchParams, - vm_gas_limit: Option, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + upload_witness_inputs_to_gcs: bool, ) -> Self { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = BatchExecutor { - vm_version, save_call_traces, max_allowed_tx_gas_limit, - validation_computational_gas_limit, commands: commands_receiver, - vm_gas_limit, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(secondary_storage, l1_batch_params)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + secondary_storage, + l1_batch_env, + system_env, + upload_witness_inputs_to_gcs, + ) + }); Self { handle, commands: commands_sender, @@ -277,6 +220,19 @@ impl BatchExecutorHandle { res } + pub(super) async fn start_next_miniblock(&self, miniblock_info: L2BlockEnv) { + // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation + // indeed has been processed. + let (response_sender, response_receiver) = oneshot::channel(); + self.commands + .send(Command::StartNextMiniblock(miniblock_info, response_sender)) + .await + .unwrap(); + let start = Instant::now(); + response_receiver.await.unwrap(); + metrics::histogram!("state_keeper.batch_executor.command_response_time", start.elapsed(), "command" => "start_next_miniblock"); + } + pub(super) async fn rollback_last_tx(&self) { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. @@ -290,7 +246,7 @@ impl BatchExecutorHandle { metrics::histogram!("state_keeper.batch_executor.command_response_time", start.elapsed(), "command" => "rollback_last_tx"); } - pub(super) async fn finish_batch(self) -> VmBlockResult { + pub(super) async fn finish_batch(self) -> (FinishedL1Batch, Option) { let (response_sender, response_receiver) = oneshot::channel(); self.commands .send(Command::FinishBatch(response_sender)) @@ -307,8 +263,9 @@ impl BatchExecutorHandle { #[derive(Debug)] pub(super) enum Command { ExecuteTx(Box, oneshot::Sender), + StartNextMiniblock(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), - FinishBatch(oneshot::Sender), + FinishBatch(oneshot::Sender<(FinishedL1Batch, Option)>), } /// Implementation of the "primary" (non-test) batch executor. @@ -319,50 +276,26 @@ pub(super) enum Command { /// be constructed. #[derive(Debug)] pub(super) struct BatchExecutor { - vm_version: VmVersion, save_call_traces: bool, max_allowed_tx_gas_limit: U256, - validation_computational_gas_limit: u32, commands: mpsc::Receiver, - vm_gas_limit: Option, } impl BatchExecutor { - pub(super) fn run(mut self, secondary_storage: RocksdbStorage, l1_batch_params: L1BatchParams) { - vlog::info!( - "Starting executing batch #{}", - l1_batch_params - .context_mode - .inner_block_context() - .context - .block_number - ); + pub(super) fn run( + mut self, + secondary_storage: RocksdbStorage, + l1_batch_params: L1BatchEnv, + system_env: SystemEnv, + upload_witness_inputs_to_gcs: bool, + ) { + tracing::info!("Starting executing batch #{:?}", &l1_batch_params.number); - let mut storage_view = StorageView::new(&secondary_storage); - let mut oracle_tools = OracleTools::new(self.vm_version, &mut storage_view); - let block_properties = BlockProperties::new( - self.vm_version, - l1_batch_params.properties.default_aa_code_hash, - ); - let mut vm = match self.vm_gas_limit { - Some(vm_gas_limit) => init_vm_with_gas_limit( - self.vm_version, - &mut oracle_tools, - l1_batch_params.context_mode, - &block_properties, - TxExecutionMode::VerifyExecute, - &l1_batch_params.base_system_contracts, - vm_gas_limit, - ), - None => init_vm( - self.vm_version, - &mut oracle_tools, - l1_batch_params.context_mode, - &block_properties, - TxExecutionMode::VerifyExecute, - &l1_batch_params.base_system_contracts, - ), - }; + let storage_view = StorageView::new(secondary_storage).to_rc_ptr(); + + let mut instance_data = + VmInstanceData::new(storage_view.clone(), &system_env, HistoryEnabled); + let mut vm = VmInstance::new(l1_batch_params, system_env, &mut instance_data); while let Some(cmd) = self.commands.blocking_recv() { match cmd { @@ -374,12 +307,22 @@ impl BatchExecutor { self.rollback_last_tx(&mut vm); resp.send(()).unwrap(); } + Command::StartNextMiniblock(l2_block_env, resp) => { + self.start_next_miniblock(l2_block_env, &mut vm); + resp.send(()).unwrap(); + } Command::FinishBatch(resp) => { - resp.send(self.finish_batch(&mut vm)).unwrap(); + let vm_block_result = self.finish_batch(&mut vm); + let witness_block_state = if upload_witness_inputs_to_gcs { + Some(storage_view.borrow_mut().witness_block_state()) + } else { + None + }; + resp.send((vm_block_result, witness_block_state)).unwrap(); // storage_view cannot be accessed while borrowed by the VM, // so this is the only point at which storage metrics can be obtained - let metrics = storage_view.metrics(); + let metrics = storage_view.as_ref().borrow_mut().metrics(); metrics::histogram!( "state_keeper.batch_storage_interaction_duration", metrics.time_spent_on_get_value, @@ -396,32 +339,34 @@ impl BatchExecutor { } } // State keeper can exit because of stop signal, so it's OK to exit mid-batch. - vlog::info!("State keeper exited with an unfinished batch"); + tracing::info!("State keeper exited with an unfinished batch"); } - fn execute_tx(&self, tx: &Transaction, vm: &mut VmInstance<'_>) -> TxExecutionResult { - let gas_consumed_before_tx = vm.gas_consumed(); - + fn execute_tx( + &self, + tx: &Transaction, + vm: &mut VmInstance<'_, S, HistoryEnabled>, + ) -> TxExecutionResult { // Save pre-`execute_next_tx` VM snapshot. - vm.save_current_vm_as_snapshot(); + vm.make_snapshot(); // Reject transactions with too big gas limit. // They are also rejected on the API level, but // we need to secure ourselves in case some tx will somehow get into mempool. if tx.gas_limit() > self.max_allowed_tx_gas_limit { - vlog::warn!( + tracing::warn!( "Found tx with too big gas limit in state keeper, hash: {:?}, gas_limit: {}", tx.hash(), tx.gas_limit() ); return TxExecutionResult::RejectedByVm { - rejection_reason: TxRevertReason::TooBigGasLimit, + reason: Halt::TooBigGasLimit, }; } // Execute the transaction. let stage_started_at = Instant::now(); - let tx_result = self.execute_tx_in_vm(tx, vm); + let (tx_result, compressed_bytecodes, call_tracer_result) = self.execute_tx_in_vm(tx, vm); metrics::histogram!( "server.state_keeper.tx_execution_time", stage_started_at.elapsed(), @@ -437,37 +382,42 @@ impl BatchExecutor { "stage" => "state_keeper" ); - let (exec_result, compressed_bytecodes) = match tx_result { - Err(TxRevertReason::BootloaderOutOfGas) => { - return TxExecutionResult::BootloaderOutOfGasForTx - } - Err(rejection_reason) => return TxExecutionResult::RejectedByVm { rejection_reason }, - Ok((exec_result, compressed_bytecodes)) => (exec_result, compressed_bytecodes), - }; + if let ExecutionResult::Halt { reason } = tx_result.result { + return match reason { + Halt::BootloaderOutOfGas => TxExecutionResult::BootloaderOutOfGasForTx, + _ => TxExecutionResult::RejectedByVm { reason }, + }; + } - let tx_metrics = - Self::get_execution_metrics(vm, Some(tx), &exec_result.result, gas_consumed_before_tx); - - match self.dryrun_block_tip(vm) { - Ok((bootloader_dry_run_result, bootloader_dry_run_metrics)) => { - TxExecutionResult::Success { - tx_result: Box::new(exec_result), - tx_metrics, - bootloader_dry_run_metrics, - bootloader_dry_run_result: Box::new(bootloader_dry_run_result), - compressed_bytecodes, - } - } - Err(err) => { - vlog::warn!("VM reverted while executing block tip: {}", err); - TxExecutionResult::BootloaderOutOfGasForBlockTip + let tx_metrics = Self::get_execution_metrics(Some(tx), &tx_result); + + let (bootloader_dry_run_result, bootloader_dry_run_metrics) = self.dryrun_block_tip(vm); + match &bootloader_dry_run_result.result { + ExecutionResult::Success { .. } => TxExecutionResult::Success { + tx_result: Box::new(tx_result), + tx_metrics, + bootloader_dry_run_metrics, + bootloader_dry_run_result: Box::new(bootloader_dry_run_result), + compressed_bytecodes, + call_tracer_result, + }, + ExecutionResult::Revert { .. } => { + unreachable!( + "VM must not revert when finalizing block (except `BootloaderOutOfGas`)" + ); } + ExecutionResult::Halt { reason } => match reason { + Halt::BootloaderOutOfGas => TxExecutionResult::BootloaderOutOfGasForBlockTip, + _ => { + panic!("VM must not revert when finalizing block (except `BootloaderOutOfGas`)") + } + }, } } - fn rollback_last_tx(&self, vm: &mut VmInstance<'_>) { + fn rollback_last_tx(&self, vm: &mut VmInstance<'_, S, HistoryEnabled>) { let stage_started_at = Instant::now(); - vm.rollback_to_snapshot_popping(); + vm.rollback_to_the_latest_snapshot(); metrics::histogram!( "server.state_keeper.tx_execution_time", stage_started_at.elapsed(), @@ -475,19 +425,40 @@ impl BatchExecutor { ); } - fn finish_batch(&self, vm: &mut VmInstance<'_>) -> VmBlockResult { - vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing) + fn start_next_miniblock( + &self, + l2_block_env: L2BlockEnv, + vm: &mut VmInstance<'_, S, HistoryEnabled>, + ) { + vm.start_new_l2_block(l2_block_env); + } + + fn finish_batch( + &self, + vm: &mut VmInstance<'_, S, HistoryEnabled>, + ) -> FinishedL1Batch { + // The vm execution was paused right after the last transaction was executed. + // There is some post-processing work that the VM needs to do before the block is fully processed. + let result = vm.finish_batch(); + if result.block_tip_execution_result.result.is_failed() { + panic!("VM must not fail when finalizing block"); + } + result } // Err when transaction is rejected. // Ok(TxExecutionStatus::Success) when the transaction succeeded // Ok(TxExecutionStatus::Failure) when the transaction failed. // Note that failed transactions are considered properly processed and are included in blocks - fn execute_tx_in_vm( + fn execute_tx_in_vm( &self, tx: &Transaction, - vm: &mut VmInstance<'_>, - ) -> Result<(VmTxExecutionResult, Vec), TxRevertReason> { + vm: &mut VmInstance<'_, S, HistoryEnabled>, + ) -> ( + VmExecutionResultAndLogs, + Vec, + Vec, + ) { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. // In order to not let the accounts spam the space of compressed bytecodes with bytecodes @@ -498,130 +469,106 @@ impl BatchExecutor { // and so we reexecute the transaction, but without compressions. // Saving the snapshot before executing - vm.save_current_vm_as_snapshot(); + vm.make_snapshot(); - let compressed_bytecodes = if tx.is_l1() { - // For L1 transactions there are no compressed bytecodes - vec![] + let call_tracer_result = Arc::new(OnceCell::default()); + let custom_tracers = if self.save_call_traces { + vec![CallTracer::new(call_tracer_result.clone(), HistoryEnabled).into_boxed()] } else { - // Deduplicate and filter factory deps preserving original order. - let deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); - let mut deps_hashes = HashSet::with_capacity(deps.len()); - let filtered_deps = deps.iter().filter_map(|bytecode| { - let bytecode_hash = hash_bytecode(bytecode); - let is_known = - !deps_hashes.insert(bytecode_hash) || vm.is_bytecode_known(&bytecode_hash); - if is_known { - None - } else { - CompressedBytecodeInfo::from_original(bytecode.clone()).ok() - } - }); - filtered_deps.collect() - }; - - vm.push_transaction_to_bootloader_memory( - tx, - TxExecutionMode::VerifyExecute, - Some(compressed_bytecodes.clone()), - ); - let result_with_compression = vm.execute_next_tx( - self.validation_computational_gas_limit, - self.save_call_traces, - )?; - - let at_least_one_unpublished = { - compressed_bytecodes - .iter() - .any(|info| !vm.is_bytecode_known(&hash_bytecode(&info.original))) + vec![] }; + if let Ok(result) = + vm.inspect_transaction_with_bytecode_compression(custom_tracers, tx.clone(), true) + { + let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); + vm.pop_snapshot_no_rollback(); - if at_least_one_unpublished { - // Rolling back and trying to execute one more time. - vm.rollback_to_snapshot_popping(); - vm.push_transaction_to_bootloader_memory( - tx, - TxExecutionMode::VerifyExecute, - Some(vec![]), - ); + let trace = Arc::try_unwrap(call_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + return (result, compressed_bytecodes, trace); + } - vm.execute_next_tx( - self.validation_computational_gas_limit, - self.save_call_traces, - ) - .map(|val| (val, vec![])) + let call_tracer_result = Arc::new(OnceCell::default()); + let custom_tracers = if self.save_call_traces { + vec![CallTracer::new(call_tracer_result.clone(), HistoryEnabled).into_boxed()] } else { - // Remove the snapshot taken at the start of this function as it is not needed anymore. - vm.pop_snapshot_no_rollback(); - Ok((result_with_compression, compressed_bytecodes)) - } + vec![] + }; + vm.rollback_to_the_latest_snapshot(); + let result = vm + .inspect_transaction_with_bytecode_compression(custom_tracers, tx.clone(), false) + .expect("Compression can't fail if we don't apply it"); + let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); + + // TODO implement tracer manager which will be responsible + // for collecting result from all tracers and save it to the database + let trace = Arc::try_unwrap(call_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + (result, compressed_bytecodes, trace) } - fn dryrun_block_tip( + fn dryrun_block_tip( &self, - vm: &mut VmInstance<'_>, - ) -> Result<(VmPartialExecutionResult, ExecutionMetricsForCriteria), TxRevertReason> { - let stage_started_at = Instant::now(); - let gas_consumed_before = vm.gas_consumed(); + vm: &mut VmInstance<'_, S, HistoryEnabled>, + ) -> (VmExecutionResultAndLogs, ExecutionMetricsForCriteria) { + let started_at = Instant::now(); + let mut stage_started_at = Instant::now(); // Save pre-`execute_till_block_end` VM snapshot. - vm.save_current_vm_as_snapshot(); + vm.make_snapshot(); + + metrics::histogram!( + "server.state_keeper.tx_execution_time", + stage_started_at.elapsed(), + "stage" => "dryrun_make_snapshot", + ); + stage_started_at = Instant::now(); + let block_tip_result = vm.execute_block_tip(); - let result = match &block_tip_result.revert_reason { - None => { - let metrics = - Self::get_execution_metrics(vm, None, &block_tip_result, gas_consumed_before); - Ok((block_tip_result, metrics)) - } - Some(TxRevertReason::BootloaderOutOfGas) => Err(TxRevertReason::BootloaderOutOfGas), - Some(other_reason) => { - panic!("VM must not revert when finalizing block (except `BootloaderOutOfGas`). Revert reason: {:?}", other_reason); - } - }; + + metrics::histogram!( + "server.state_keeper.tx_execution_time", + stage_started_at.elapsed(), + "stage" => "dryrun_execute_block_tip", + ); + stage_started_at = Instant::now(); + + let metrics = Self::get_execution_metrics(None, &block_tip_result); + + metrics::histogram!( + "server.state_keeper.tx_execution_time", + stage_started_at.elapsed(), + "stage" => "dryrun_get_execution_metrics", + ); + stage_started_at = Instant::now(); // Rollback to the pre-`execute_till_block_end` state. - vm.rollback_to_snapshot_popping(); + vm.rollback_to_the_latest_snapshot(); metrics::histogram!( "server.state_keeper.tx_execution_time", stage_started_at.elapsed(), - "stage" => "dryrun_block_tip" + "stage" => "dryrun_rollback_to_the_latest_snapshot" ); - result + metrics::histogram!( + "server.state_keeper.tx_execution_time", + started_at.elapsed(), + "stage" => "dryrun_rollback" + ); + + (block_tip_result, metrics) } fn get_execution_metrics( - vm: &VmInstance<'_>, tx: Option<&Transaction>, - execution_result: &VmPartialExecutionResult, - gas_consumed_before: u32, + execution_result: &VmExecutionResultAndLogs, ) -> ExecutionMetricsForCriteria { - let gas_consumed_after = vm.gas_consumed(); - assert!( - gas_consumed_after >= gas_consumed_before, - "Invalid consumed gas value, possible underflow. Tx: {:?}", - tx - ); - let gas_used = gas_consumed_after - gas_consumed_before; - let total_factory_deps = tx - .map(|tx| { - tx.execute - .factory_deps - .as_ref() - .map_or(0, |deps| deps.len() as u16) - }) - .unwrap_or(0); - - let execution_metrics = ExecutionMetrics::new( - &execution_result.logs, - gas_used as usize, - total_factory_deps, - execution_result.contracts_used, - execution_result.cycles_used, - execution_result.computational_gas_used, - ); - + let execution_metrics = execution_result.get_execution_metrics(tx); let l1_gas = match tx { Some(tx) => gas_count_from_tx_and_metrics(tx, &execution_metrics), None => gas_count_from_metrics(&execution_metrics), diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 6c4199d3e170..bae8b81fc134 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -2,13 +2,15 @@ use assert_matches::assert_matches; use db_test_macro::db_test; use zksync_dal::ConnectionPool; -use zksync_types::{tx::tx_execution_info::TxExecutionStatus, PriorityOpId}; +use zksync_types::PriorityOpId; mod tester; -use self::tester::{Account, Tester}; +use self::tester::Tester; use super::TxExecutionResult; -use crate::state_keeper::batch_executor::tests::tester::TestConfig; +use crate::state_keeper::batch_executor::tests::tester::{AccountLoadNextExecutable, TestConfig}; + +use zksync_test_account::Account; /// Ensures that the transaction was executed successfully. fn assert_executed(execution_result: &TxExecutionResult) { @@ -24,7 +26,7 @@ fn assert_rejected(execution_result: &TxExecutionResult) { fn assert_reverted(execution_result: &TxExecutionResult) { assert_executed(execution_result); if let TxExecutionResult::Success { tx_result, .. } = execution_result { - assert_matches!(tx_result.status, TxExecutionStatus::Failure); + assert!(tx_result.result.is_failed()); } else { unreachable!(); } @@ -36,6 +38,7 @@ async fn execute_l2_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; @@ -51,6 +54,7 @@ async fn execute_l1_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; @@ -85,6 +89,7 @@ async fn rollback(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; @@ -117,6 +122,7 @@ async fn rollback(connection_pool: ConnectionPool) { tx_metrics_old, tx_metrics_new, "Execution results must be the same" ); + executor.finish_batch().await; } @@ -126,13 +132,13 @@ async fn reject_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; let executor = tester.create_batch_executor().await; // Wallet is not funded, it can't pay for fees. let res = executor.execute_tx(alice.execute()).await; assert_rejected(&res); - executor.finish_batch().await; } /// Checks that tx with too big gas limit is correctly rejected. @@ -146,20 +152,22 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { let executor = tester.create_batch_executor().await; let bad_tx = alice.execute_with_gas_limit(u32::MAX); + let res_old = executor.execute_tx(bad_tx.clone()).await; assert_rejected(&res_old); executor.rollback_last_tx().await; let res_new = executor.execute_tx(bad_tx).await; assert_rejected(&res_new); + executor.rollback_last_tx().await; let ( TxExecutionResult::RejectedByVm { - rejection_reason: rejection_reason_old, + reason: rejection_reason_old, .. }, TxExecutionResult::RejectedByVm { - rejection_reason: rejection_reason_new, + reason: rejection_reason_new, .. }, ) = (res_old, res_new) @@ -173,6 +181,7 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { // Ensure that now we can execute a valid tx. alice.nonce -= 1; // Reset the nonce. + let res = executor.execute_tx(alice.execute()).await; assert_executed(&res); executor.finish_batch().await; @@ -195,7 +204,6 @@ async fn tx_cant_be_reexecuted(connection_pool: ConnectionPool) { // Nonce is used for the second tx. let res2 = executor.execute_tx(tx).await; assert_rejected(&res2); - executor.finish_batch().await; } /// Checks that we can deploy and call the loadnext contract. @@ -208,12 +216,18 @@ async fn deploy_and_call_loadtest(connection_pool: ConnectionPool) { tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; - let (deploy_tx, loadtest_address) = alice.deploy_loadnext_tx(); - assert_executed(&executor.execute_tx(deploy_tx).await); - let custom_gas_tx = alice.loadnext_custom_gas_call(loadtest_address, 10, 10_000_000); - assert_executed(&executor.execute_tx(custom_gas_tx).await); - let custom_writes_tx = alice.loadnext_custom_writes_call(loadtest_address, 1, 500_000_000); - assert_executed(&executor.execute_tx(custom_writes_tx).await); + let tx = alice.deploy_loadnext_tx(); + assert_executed(&executor.execute_tx(tx.tx).await); + assert_executed( + &executor + .execute_tx(alice.loadnext_custom_gas_call(tx.address, 10, 10_000_000)) + .await, + ); + assert_executed( + &executor + .execute_tx(alice.loadnext_custom_writes_call(tx.address, 1, 500_000_000)) + .await, + ); executor.finish_batch().await; } @@ -223,19 +237,22 @@ async fn execute_reverted_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; - let (deploy_tx, loadtest_address) = alice.deploy_loadnext_tx(); - assert_executed(&executor.execute_tx(deploy_tx).await); + let tx = alice.deploy_loadnext_tx(); + assert_executed(&executor.execute_tx(tx.tx).await); - let custom_writes_tx = alice.loadnext_custom_writes_call( - loadtest_address, - 1, - 1_000_000, // We provide enough gas for tx to be executed, but not enough for the call to be successful. + assert_reverted( + &executor + .execute_tx(alice.loadnext_custom_writes_call( + tx.address, 1, + 1_000_000, // We provide enough gas for tx to be executed, but not enough for the call to be successful. + )) + .await, ); - assert_reverted(&executor.execute_tx(custom_writes_tx).await); executor.finish_batch().await; } @@ -247,6 +264,7 @@ async fn execute_realistic_scenario(connection_pool: ConnectionPool) { let mut bob = Account::random(); let tester = Tester::new(connection_pool); + tester.genesis().await; tester.fund(&[alice.address()]).await; tester.fund(&[bob.address()]).await; @@ -300,6 +318,7 @@ async fn bootloader_out_of_gas_for_any_tx(connection_pool: ConnectionPool) { vm_gas_limit: Some(10), max_allowed_tx_gas_limit: u32::MAX, validation_computational_gas_limit: u32::MAX, + upload_witness_inputs_to_gcs: false, }, ); @@ -309,8 +328,6 @@ async fn bootloader_out_of_gas_for_any_tx(connection_pool: ConnectionPool) { let res = executor.execute_tx(alice.execute()).await; assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); - - executor.finish_batch().await; } /// Checks that we can handle the bootloader out of gas error on tip phase. @@ -328,21 +345,20 @@ async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { let res = executor.execute_tx(alice.execute()).await; assert_executed(&res); - let vm_block_res = executor.finish_batch().await; + let (vm_block_res, _witness_block_state) = executor.finish_batch().await; // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. tester.set_config(TestConfig { save_call_traces: false, - vm_gas_limit: Some(vm_block_res.full_result.gas_used - 10), + vm_gas_limit: Some(vm_block_res.block_tip_execution_result.statistics.gas_used - 10), max_allowed_tx_gas_limit: u32::MAX, validation_computational_gas_limit: u32::MAX, + upload_witness_inputs_to_gcs: false, }); let second_executor = tester.create_batch_executor().await; let res = second_executor.execute_tx(alice.execute()).await; - assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForBlockTip); - - second_executor.finish_batch().await; + assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index c9af99c71bbe..9afca00f463a 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,43 +1,30 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. -use multivm::VmVersion; use tempfile::TempDir; use vm::{ - test_utils::{ - get_create_zksync_address, get_deploy_tx, mock_loadnext_gas_burn_call, - mock_loadnext_test_call, - }, - vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - zk_evm::{ - block_properties::BlockProperties, - zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, - }, + constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, + {L1BatchEnv, SystemEnv}, }; -use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{get_loadnext_contract, TestContract}; +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_dal::ConnectionPool; use zksync_state::RocksdbStorage; +use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ - ethabi::{encode, Token}, - fee::Fee, - l1::{L1Tx, OpProcessingType, PriorityQueueType}, - l2::L2Tx, - system_contracts::get_system_smart_contracts, - utils::storage_key_for_standard_token_balance, - AccountTreeId, Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, MiniblockNumber, - Nonce, PackedEthSignature, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, + ethabi::Token, fee::Fee, system_contracts::get_system_smart_contracts, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, + L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, L2_ETH_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; -use zksync_utils::{test_utils::LoadnextContractExecutionParams, u256_to_h256}; +use zksync_utils::u256_to_h256; use crate::genesis::create_genesis_l1_batch; use crate::state_keeper::{ batch_executor::BatchExecutorHandle, - io::L1BatchParams, - tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, + tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; @@ -51,6 +38,7 @@ pub(super) struct TestConfig { pub(super) vm_gas_limit: Option, pub(super) max_allowed_tx_gas_limit: u32, pub(super) validation_computational_gas_limit: u32, + pub(super) upload_witness_inputs_to_gcs: bool, } impl TestConfig { @@ -63,6 +51,7 @@ impl TestConfig { save_call_traces: false, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit, validation_computational_gas_limit: config.validation_computational_gas_limit, + upload_witness_inputs_to_gcs: false, } } } @@ -99,7 +88,11 @@ impl Tester { /// This function intentionally uses sensible defaults to not introduce boilerplate. pub(super) async fn create_batch_executor(&self) -> BatchExecutorHandle { // Not really important for the batch executor - it operates over a single batch. - let (block_context, block_properties) = self.batch_params(L1BatchNumber(1), 100); + let (l1_batch, system_env) = self.batch_params( + L1BatchNumber(1), + 100, + self.config.validation_computational_gas_limit, + ); let mut secondary_storage = RocksdbStorage::new(self.db_dir.path()); let mut conn = self.pool.access_storage_tagged("state_keeper").await; @@ -109,18 +102,12 @@ impl Tester { // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden // for the test pool (see the doc-comment on `TestPool` for details). BatchExecutorHandle::new( - VmVersion::latest(), self.config.save_call_traces, self.config.max_allowed_tx_gas_limit.into(), - self.config.validation_computational_gas_limit, secondary_storage, - L1BatchParams { - context_mode: block_context, - properties: block_properties, - base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), - protocol_version: ProtocolVersionId::latest(), - }, - self.config.vm_gas_limit, + l1_batch, + system_env, + self.config.upload_witness_inputs_to_gcs, ) } @@ -129,26 +116,17 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (BlockContextMode, BlockProperties) { - let block_properties = default_block_properties(); - - let context = BlockContext { - block_number: l1_batch_number.0, - block_timestamp: timestamp, - l1_gas_price: 1, - fair_l2_gas_price: 1, - operator_address: self.fee_account, - }; - let derived_context = DerivedBlockContext { - context, - base_fee: 1, - }; - - let previous_block_hash = U256::zero(); // Not important in this context. - ( - BlockContextMode::NewBlock(derived_context, previous_block_hash), - block_properties, - ) + validation_computational_gas_limit: u32, + ) -> (L1BatchEnv, SystemEnv) { + let mut system_params = default_system_env(); + if let Some(vm_gas_limit) = self.config.vm_gas_limit { + system_params.gas_limit = vm_gas_limit; + } + system_params.default_validation_computational_gas_limit = + validation_computational_gas_limit; + let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); + batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. + (batch_params, system_params) } /// Performs the genesis in the storage. @@ -159,6 +137,7 @@ impl Tester { &mut storage, self.fee_account, CHAIN_ID, + ProtocolVersionId::latest(), &BASE_SYSTEM_CONTRACTS, &get_system_smart_contracts(), Default::default(), @@ -195,123 +174,64 @@ impl Tester { } } -/// Test account that maintains its own nonce and is able to encode common transaction types useful for tests. -#[derive(Debug)] -pub(super) struct Account { - pub pk: H256, - pub nonce: Nonce, -} - -impl Account { - pub(super) fn random() -> Self { - Self { - pk: H256::random(), - nonce: Nonce(0), - } - } - - /// Returns the address of the account. - pub(super) fn address(&self) -> Address { - PackedEthSignature::address_from_private_key(&self.pk).unwrap() - } +pub trait AccountLoadNextExecutable { + fn deploy_loadnext_tx(&mut self) -> DeployContractsTx; + fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction; /// Returns a valid `execute` transaction. /// Automatically increments nonce of the account. - pub(super) fn execute(&mut self) -> Transaction { - self.execute_with_gas_limit(1_000_000) - } - + fn execute(&mut self) -> Transaction; + fn loadnext_custom_writes_call( + &mut self, + address: Address, + writes: u32, + gas_limit: u32, + ) -> Transaction; /// Returns a valid `execute` transaction. /// Automatically increments nonce of the account. - pub(super) fn execute_with_gas_limit(&mut self, gas_limit: u32) -> Transaction { - let fee = fee(gas_limit); - let mut l2_tx = L2Tx::new_signed( - Address::random(), - vec![], - self.nonce, - fee, - Default::default(), - CHAIN_ID, - &self.pk, - None, - Default::default(), - ) - .unwrap(); - // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. - // This input will be used for the derivation of the tx hash, so put some random to it to be sure - // that the transaction hash is unique. - l2_tx.set_input(H256::random().0.to_vec(), H256::random()); - - // Increment the account nonce. - self.nonce += 1; + fn execute_with_gas_limit(&mut self, gas_limit: u32) -> Transaction; + /// Returns a transaction to the loadnext contract with custom gas limit and expected burned gas amount. + /// Increments the account nonce. + fn loadnext_custom_gas_call( + &mut self, + address: Address, + gas_to_burn: u32, + gas_limit: u32, + ) -> Transaction; +} - l2_tx.into() +impl AccountLoadNextExecutable for Account { + fn deploy_loadnext_tx(&mut self) -> DeployContractsTx { + let loadnext_contract = get_loadnext_contract(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + self.get_deploy_tx_with_factory_deps( + &loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps.clone(), + TxType::L2, + ) } - - /// Returns a valid `execute` transaction initiated from L1. - /// Does not increment nonce. - pub(super) fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction { - let execute = Execute { - contract_address: Address::random(), - value: Default::default(), - calldata: vec![], - factory_deps: None, - }; - - let max_fee_per_gas = U256::from(1u32); - let gas_limit = U256::from(100_100); - let priority_op_data = L1TxCommonData { - sender: self.address(), - canonical_tx_hash: H256::from_low_u64_be(serial_id.0), - serial_id, - deadline_block: 100000, - layer_2_tip_fee: U256::zero(), - full_fee: U256::zero(), - gas_limit, - max_fee_per_gas, - op_processing_type: OpProcessingType::Common, - priority_queue_type: PriorityQueueType::Deque, - eth_hash: H256::random(), - eth_block: 1, - gas_per_pubdata_limit: U256::from(800), - to_mint: gas_limit * max_fee_per_gas + execute.value, - refund_recipient: self.address(), - }; - - let tx = L1Tx { - common_data: priority_op_data, - execute, - received_timestamp_ms: 0, - }; - tx.into() + fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction { + self.get_l1_tx( + Execute { + contract_address: Address::random(), + value: Default::default(), + calldata: vec![], + factory_deps: None, + }, + serial_id.0, + ) } - /// Returns the transaction to deploy the loadnext contract and address of this contract (after deployment). - /// Increments the account nonce. - pub(super) fn deploy_loadnext_tx(&mut self) -> (Transaction, Address) { - let TestContract { - bytecode, - factory_deps, - .. - } = get_loadnext_contract(); - let loadnext_deploy_tx = get_deploy_tx( - self.pk, - self.nonce, - &bytecode, - factory_deps, - &encode(&[Token::Uint(U256::from(1000))]), - fee(500_000_000), - ); - let test_contract_address = - get_create_zksync_address(loadnext_deploy_tx.initiator_account(), self.nonce); - self.nonce += 1; - - (loadnext_deploy_tx.into(), test_contract_address) + /// Returns a valid `execute` transaction. + /// Automatically increments nonce of the account. + fn execute(&mut self) -> Transaction { + self.execute_with_gas_limit(1_000_000) } /// Returns a transaction to the loadnext contract with custom amount of write requests. /// Increments the account nonce. - pub(super) fn loadnext_custom_writes_call( + fn loadnext_custom_writes_call( &mut self, address: Address, writes: u32, @@ -324,36 +244,60 @@ impl Account { let fee = fee(minimal_fee + gas_limit); - let tx = mock_loadnext_test_call( - self.pk, - self.nonce, - address, - fee, - LoadnextContractExecutionParams { - reads: 100, - writes: writes as usize, - events: 100, - hashes: 100, - recursive_calls: 0, - deploys: 100, + self.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: writes as usize, + events: 100, + hashes: 100, + recursive_calls: 0, + deploys: 100, + } + .to_bytes(), + value: Default::default(), + factory_deps: None, }, - ); - self.nonce += 1; - tx.into() + Some(fee), + ) + } + + /// Returns a valid `execute` transaction. + /// Automatically increments nonce of the account. + fn execute_with_gas_limit(&mut self, gas_limit: u32) -> Transaction { + let fee = fee(gas_limit); + self.get_l2_tx_for_execute( + Execute { + contract_address: Address::random(), + calldata: vec![], + value: Default::default(), + factory_deps: None, + }, + Some(fee), + ) } /// Returns a transaction to the loadnext contract with custom gas limit and expected burned gas amount. /// Increments the account nonce. - pub(super) fn loadnext_custom_gas_call( + fn loadnext_custom_gas_call( &mut self, address: Address, gas_to_burn: u32, gas_limit: u32, ) -> Transaction { let fee = fee(gas_limit); - let tx = mock_loadnext_gas_burn_call(self.pk, self.nonce, address, fee, gas_to_burn); - self.nonce += 1; - tx.into() + let calldata = mock_loadnext_gas_burn_calldata(gas_to_burn); + + self.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata, + value: Default::default(), + factory_deps: None, + }, + Some(fee), + ) } } @@ -365,3 +309,14 @@ fn fee(gas_limit: u32) -> Fee { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), } } + +pub fn mock_loadnext_gas_burn_calldata(gas: u32) -> Vec { + let loadnext_contract = get_loadnext_contract(); + + let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); + + let params = vec![Token::Uint(U256::from(gas))]; + contract_function + .encode_input(¶ms) + .expect("failed to encode parameters") +} diff --git a/core/bin/zksync_core/src/state_keeper/extractors.rs b/core/bin/zksync_core/src/state_keeper/extractors.rs index 5da6836ad2d2..f4fc397911aa 100644 --- a/core/bin/zksync_core/src/state_keeper/extractors.rs +++ b/core/bin/zksync_core/src/state_keeper/extractors.rs @@ -8,9 +8,8 @@ use std::{ time::{Duration, Instant}, }; -use vm::transaction_data::TransactionData; use zksync_dal::StorageProcessor; -use zksync_types::{L1BatchNumber, Transaction, U256}; +use zksync_types::{L1BatchNumber, U256}; use zksync_utils::h256_to_u256; /// Displays a Unix timestamp (seconds since epoch) in human-readable form. Useful for logging. @@ -64,7 +63,7 @@ async fn wait_for_l1_batch_params_unchecked( .get_l1_batch_state_root_and_timestamp(number) .await; if let Some((root_hash, timestamp)) = data { - vlog::trace!( + tracing::trace!( "Waiting for hash of L1 batch #{number} took {:?}", stage_started_at.elapsed() ); @@ -74,9 +73,3 @@ async fn wait_for_l1_batch_params_unchecked( tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; } } - -/// Returns size in VM words of an encoded transaction. -pub(super) fn encoded_transaction_size(tx: Transaction) -> usize { - let tx_data: TransactionData = tx.into(); - tx_data.into_tokens().len() -} diff --git a/core/bin/zksync_core/src/state_keeper/io/common.rs b/core/bin/zksync_core/src/state_keeper/io/common.rs index 8547f8481c38..e527bd159462 100644 --- a/core/bin/zksync_core/src/state_keeper/io/common.rs +++ b/core/bin/zksync_core/src/state_keeper/io/common.rs @@ -1,50 +1,60 @@ use std::time::Duration; -use vm::{ - vm_with_bootloader::{BlockContext, BlockContextMode}, - zk_evm::block_properties::BlockProperties, -}; +use vm::{constants::BLOCK_GAS_LIMIT, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; -use zksync_types::{Address, L1BatchNumber, ProtocolVersionId, U256, ZKPORTER_IS_AVAILABLE}; -use zksync_utils::h256_to_u256; - -use itertools::Itertools; +use zksync_types::{ + Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, U256, + ZKPORTER_IS_AVAILABLE, +}; +use zksync_utils::u256_to_h256; -use super::{L1BatchParams, PendingBatchData}; +use super::PendingBatchData; use crate::state_keeper::extractors; /// Returns the parameters required to initialize the VM for the next L1 batch. #[allow(clippy::too_many_arguments)] pub(crate) fn l1_batch_params( current_l1_batch_number: L1BatchNumber, - operator_address: Address, + fee_account: Address, l1_batch_timestamp: u64, - previous_block_hash: U256, + previous_batch_hash: U256, l1_gas_price: u64, fair_l2_gas_price: u64, + first_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, base_system_contracts: BaseSystemContracts, + validation_computational_gas_limit: u32, protocol_version: ProtocolVersionId, -) -> L1BatchParams { - let block_properties = BlockProperties { - default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), - zkporter_is_available: ZKPORTER_IS_AVAILABLE, - }; - - let context = BlockContext { - block_number: current_l1_batch_number.0, - block_timestamp: l1_batch_timestamp, - l1_gas_price, - fair_l2_gas_price, - operator_address, - }; - - L1BatchParams { - context_mode: BlockContextMode::NewBlock(context.into(), previous_block_hash), - properties: block_properties, - base_system_contracts, - protocol_version, - } + virtual_blocks: u32, + chain_id: L2ChainId, +) -> (SystemEnv, L1BatchEnv) { + ( + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: protocol_version, + base_system_smart_contracts: base_system_contracts, + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + }, + L1BatchEnv { + previous_batch_hash: Some(u256_to_h256(previous_batch_hash)), + number: current_l1_batch_number, + timestamp: l1_batch_timestamp, + l1_gas_price, + fair_l2_gas_price, + fee_account, + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: first_miniblock_number.0, + timestamp: l1_batch_timestamp, + prev_block_hash: prev_miniblock_hash, + max_virtual_blocks_to_create: virtual_blocks, + }, + }, + ) } /// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. @@ -61,6 +71,8 @@ pub(crate) async fn load_pending_batch( storage: &mut StorageProcessor<'_>, current_l1_batch_number: L1BatchNumber, fee_account: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, ) -> Option { // If pending miniblock doesn't exist, it means that there is no unsynced state (i.e. no transaction // were executed after the last sealed batch). @@ -77,10 +89,18 @@ pub(crate) async fn load_pending_batch( .get_miniblock_header(pending_miniblock_number) .await?; - vlog::info!("Getting previous batch hash"); + tracing::info!("Getting previous batch hash"); let (previous_l1_batch_hash, _) = extractors::wait_for_prev_l1_batch_params(storage, current_l1_batch_number).await; + tracing::info!("Getting previous miniblock hash"); + let prev_miniblock_hash = storage + .blocks_dal() + .get_miniblock_header(pending_miniblock_number - 1) + .await + .unwrap() + .hash; + let base_system_contracts = storage .storage_dal() .get_base_system_contracts( @@ -93,18 +113,23 @@ pub(crate) async fn load_pending_batch( ) .await; - vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); - let params = l1_batch_params( + tracing::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); + let (system_env, l1_batch_env) = l1_batch_params( current_l1_batch_number, fee_account, pending_miniblock_header.timestamp, previous_l1_batch_hash, pending_miniblock_header.l1_gas_price, pending_miniblock_header.l2_fair_gas_price, + pending_miniblock_number, + prev_miniblock_hash, base_system_contracts, + validation_computational_gas_limit, pending_miniblock_header .protocol_version .expect("`protocol_version` must be set for pending miniblock"), + pending_miniblock_header.virtual_blocks, + chain_id, ); let pending_miniblocks = storage @@ -113,71 +138,12 @@ pub(crate) async fn load_pending_batch( .await; Some(PendingBatchData { - params, + l1_batch_env, + system_env, pending_miniblocks, }) } -/// Sets missing initial writes indices. -pub async fn set_missing_initial_writes_indices(storage: &mut StorageProcessor<'_>) { - // Indices should start from 1, that's why default is (1, 0). - let (mut next_index, start_from_batch) = storage - .storage_logs_dedup_dal() - .max_set_enumeration_index() - .await - .map(|(index, l1_batch_number)| (index + 1, l1_batch_number + 1)) - .unwrap_or((1, L1BatchNumber(0))); - - let sealed_batch = storage.blocks_dal().get_sealed_l1_batch_number().await; - if start_from_batch > sealed_batch { - vlog::info!("All indices for initial writes are already set, no action is needed"); - return; - } else { - let batches_count = sealed_batch.0 - start_from_batch.0 + 1; - if batches_count > 100 { - vlog::warn!("There are {batches_count} batches to set indices for, it may take substantial time."); - } - } - - vlog::info!( - "Last set index {}. Starting migration from batch {start_from_batch}", - next_index - 1 - ); - let mut current_l1_batch = start_from_batch; - loop { - if current_l1_batch > storage.blocks_dal().get_sealed_l1_batch_number().await { - break; - } - vlog::info!("Setting indices for batch {current_l1_batch}"); - - let (hashed_keys, _): (Vec<_>, Vec<_>) = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(current_l1_batch) - .await - .into_iter() - .unzip(); - let storage_keys = storage - .storage_logs_dal() - .resolve_hashed_keys(&hashed_keys) - .await; - - // Sort storage key alphanumerically and assign indices. - let indexed_keys: Vec<_> = storage_keys - .into_iter() - .sorted() - .enumerate() - .map(|(pos, key)| (key.hashed_key(), next_index + pos as u64)) - .collect(); - storage - .storage_logs_dedup_dal() - .set_indices_for_initial_writes(&indexed_keys) - .await; - - next_index += indexed_keys.len() as u64; - current_l1_batch += 1; - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/bin/zksync_core/src/state_keeper/io/mempool.rs b/core/bin/zksync_core/src/state_keeper/io/mempool.rs index 6c431dc6396f..32eb379b5f16 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mempool.rs @@ -7,17 +7,18 @@ use std::{ time::{Duration, Instant}, }; -use vm::{ - vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, DerivedBlockContext}, - VmBlockResult, -}; +use vm::{utils::fee::derive_base_fee_and_gas_per_pubdata, FinishedL1Batch, L1BatchEnv, SystemEnv}; + use zksync_config::configs::chain::StateKeeperConfig; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; +use zksync_object_store::ObjectStoreFactory; use zksync_types::{ - protocol_version::ProtocolUpgradeTx, Address, L1BatchNumber, MiniblockNumber, + block::MiniblockHeader, protocol_version::ProtocolUpgradeTx, + witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, U256, }; +// TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; use crate::{ @@ -26,7 +27,7 @@ use crate::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - L1BatchParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, + MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, updates::UpdatesManager, @@ -34,6 +35,8 @@ use crate::{ }, }; +use super::MiniblockParams; + /// Mempool-based IO for the state keeper. /// Receives transactions from the database through the mempool filtering logic. /// Decides which batch parameters should be used for the new batch. @@ -48,10 +51,15 @@ pub(crate) struct MempoolIO { current_l1_batch_number: L1BatchNumber, fee_account: Address, fair_l2_gas_price: u64, + validation_computational_gas_limit: u32, delay_interval: Duration, // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. l1_gas_price_provider: Arc, l2_erc20_bridge_addr: Address, + chain_id: L2ChainId, + + virtual_blocks_interval: u32, + virtual_blocks_per_miniblock: u32, } #[async_trait] @@ -68,28 +76,40 @@ impl StateKeeperIO for MempoolIO< let mut storage = self.pool.access_storage_tagged("state_keeper").await; let PendingBatchData { - params, + l1_batch_env, + system_env, pending_miniblocks, - } = load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account) - .await?; + } = load_pending_batch( + &mut storage, + self.current_l1_batch_number, + self.fee_account, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await?; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let context = params.context_mode.inner_block_context().context; - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(context.l1_gas_price, context.fair_l2_gas_price); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + l1_batch_env.l1_gas_price, + l1_batch_env.fair_l2_gas_price, + ); self.filter = L2TxFilter { - l1_gas_price: context.l1_gas_price, + l1_gas_price: l1_batch_env.l1_gas_price, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; Some(PendingBatchData { - params, + l1_batch_env, + system_env, pending_miniblocks, }) } - async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { + async fn wait_for_new_batch_params( + &mut self, + max_wait: Duration, + ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; // Block until at least one transaction in the mempool can match the filter (or timeout happens). @@ -105,7 +125,13 @@ impl StateKeeperIO for MempoolIO< } let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; - let prev_miniblock_timestamp = self.load_previous_miniblock_timestamp().await; + + let MiniblockHeader { + timestamp: prev_miniblock_timestamp, + hash: prev_miniblock_hash, + .. + } = self.load_previous_miniblock_header().await; + // We cannot create two L1 batches or miniblocks with the same timestamp (forbidden by the bootloader). // Hence, we wait until the current timestamp is larger than the timestamp of the previous miniblock. // We can use `timeout_at` since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. @@ -115,7 +141,7 @@ impl StateKeeperIO for MempoolIO< ); let current_timestamp = current_timestamp.await.ok()?; - vlog::info!( + tracing::info!( "(l1_gas_price, fair_l2_gas_price) for L1 batch #{} is ({}, {})", self.current_l1_batch_number.0, self.filter.l1_gas_price, @@ -126,6 +152,7 @@ impl StateKeeperIO for MempoolIO< .protocol_versions_dal() .base_system_contracts_by_timestamp(current_timestamp) .await; + return Some(l1_batch_params( self.current_l1_batch_number, self.fee_account, @@ -133,25 +160,39 @@ impl StateKeeperIO for MempoolIO< prev_l1_batch_hash, self.filter.l1_gas_price, self.fair_l2_gas_price, + self.current_miniblock_number, + prev_miniblock_hash, base_system_contracts, + self.validation_computational_gas_limit, protocol_version, + self.get_virtual_blocks_count(true, self.current_miniblock_number.0), + self.chain_id, )); } None } + // Returns the pair of timestamp and the number of virtual blocks to be produced in this miniblock async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, prev_miniblock_timestamp: u64, - ) -> Option { + ) -> Option { // We must provide different timestamps for each miniblock. // If miniblock sealing interval is greater than 1 second then `sleep_past` won't actually sleep. - let current_timestamp = tokio::time::timeout( + let timestamp = tokio::time::timeout( max_wait, sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), - ); - current_timestamp.await.ok() + ) + .await + .ok()?; + + let virtual_blocks = self.get_virtual_blocks_count(false, self.current_miniblock_number.0); + + Some(MiniblockParams { + timestamp, + virtual_blocks, + }) } async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { @@ -192,7 +233,7 @@ impl StateKeeperIO for MempoolIO< // Mark tx as rejected in the storage. let mut storage = self.pool.access_storage_tagged("state_keeper").await; metrics::increment_counter!("server.state_keeper.rejected_transactions"); - vlog::warn!( + tracing::warn!( "transaction {} is rejected with error {}", rejected.hash(), error @@ -215,13 +256,14 @@ impl StateKeeperIO for MempoolIO< async fn seal_l1_batch( &mut self, - block_result: VmBlockResult, + witness_block_state: Option, updates_manager: UpdatesManager, - block_context: DerivedBlockContext, + l1_batch_env: &L1BatchEnv, + finished_batch: FinishedL1Batch, ) { assert_eq!( updates_manager.batch_timestamp(), - block_context.context.block_timestamp, + l1_batch_env.timestamp, "Batch timestamps don't match, batch number {}", self.current_l1_batch_number() ); @@ -229,15 +271,38 @@ impl StateKeeperIO for MempoolIO< // We cannot start sealing an L1 batch until we've sealed all miniblocks included in it. self.miniblock_sealer_handle.wait_for_all_commands().await; + if let Some(witness_witness_block_state) = witness_block_state { + let object_store = ObjectStoreFactory::from_env().create_store().await; + let mut upload_successful_metric = 1.0; + match object_store + .put(self.current_l1_batch_number(), &witness_witness_block_state) + .await + { + Ok(path) => { + tracing::debug!("Successfully uploaded witness block start state to Object Store to path = '{path}'"); + } + Err(e) => { + upload_successful_metric = 0.0; + tracing::error!( + "Failed to upload witness block start state to Object Store: {e:?}" + ); + } + } + metrics::histogram!( + "mempool.witness_block_start_upload_success", + upload_successful_metric + ); + } + let pool = self.pool.clone(); let mut storage = pool.access_storage_tagged("state_keeper").await; + updates_manager .seal_l1_batch( &mut storage, self.current_miniblock_number, - self.current_l1_batch_number, - block_result, - block_context, + l1_batch_env, + finished_batch, self.l2_erc20_bridge_addr, ) .await; @@ -274,7 +339,7 @@ async fn sleep_past(timestamp: u64, miniblock: MiniblockNumber) -> u64 { match timestamp.cmp(¤t_timestamp) { cmp::Ordering::Less => return current_timestamp, cmp::Ordering::Equal => { - vlog::info!( + tracing::info!( "Current timestamp {} for miniblock #{miniblock} is equal to previous miniblock timestamp; waiting until \ timestamp increases", extractors::display_timestamp(current_timestamp) @@ -284,7 +349,7 @@ async fn sleep_past(timestamp: u64, miniblock: MiniblockNumber) -> u64 { // This situation can be triggered if the system keeper is started on a pod with a different // system time, or if it is buggy. Thus, a one-time error could require no actions if L1 batches // are expected to be generated frequently. - vlog::error!( + tracing::error!( "Previous miniblock timestamp {} is larger than the current timestamp {} for miniblock #{miniblock}", extractors::display_timestamp(timestamp), extractors::display_timestamp(current_timestamp) @@ -313,6 +378,7 @@ async fn sleep_past(timestamp: u64, miniblock: MiniblockNumber) -> u64 { } impl MempoolIO { + #[allow(clippy::too_many_arguments)] pub(in crate::state_keeper) async fn new( mempool: MempoolGuard, miniblock_sealer_handle: MiniblockSealerHandle, @@ -321,10 +387,22 @@ impl MempoolIO { config: &StateKeeperConfig, delay_interval: Duration, l2_erc20_bridge_addr: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, ) -> Self { + assert!( + config.virtual_blocks_interval > 0, + "Virtual blocks interval must be positive" + ); + assert!( + config.virtual_blocks_per_miniblock > 0, + "Virtual blocks per miniblock must be positive" + ); + let mut storage = pool.access_storage_tagged("state_keeper").await; let last_sealed_l1_batch_header = storage.blocks_dal().get_newest_l1_batch_header().await; let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; + drop(storage); Self { @@ -337,14 +415,18 @@ impl MempoolIO { current_miniblock_number: last_miniblock_number + 1, fee_account: config.fee_account_addr, fair_l2_gas_price: config.fair_l2_gas_price, + validation_computational_gas_limit, delay_interval, l1_gas_price_provider, l2_erc20_bridge_addr, + chain_id, + virtual_blocks_interval: config.virtual_blocks_interval, + virtual_blocks_per_miniblock: config.virtual_blocks_per_miniblock, } } async fn load_previous_l1_batch_hash(&self) -> U256 { - vlog::info!( + tracing::info!( "Getting previous L1 batch hash for L1 batch #{}", self.current_l1_batch_number ); @@ -359,28 +441,42 @@ impl MempoolIO { "server.state_keeper.wait_for_prev_hash_time", stage_started_at.elapsed() ); - vlog::info!( + tracing::info!( "Got previous L1 batch hash: {batch_hash:0>64x} for L1 batch #{}", self.current_l1_batch_number ); batch_hash } - async fn load_previous_miniblock_timestamp(&self) -> u64 { + async fn load_previous_miniblock_header(&self) -> MiniblockHeader { let stage_started_at: Instant = Instant::now(); let mut storage = self.pool.access_storage_tagged("state_keeper").await; - let miniblock_timestamp = storage + let miniblock_header = storage .blocks_dal() - .get_miniblock_timestamp(self.current_miniblock_number - 1) + .get_miniblock_header(self.current_miniblock_number - 1) .await .expect("Previous miniblock must be sealed and header saved to DB"); metrics::histogram!( - "server.state_keeper.get_prev_miniblock_timestamp", + "server.state_keeper.load_previous_miniblock_header", stage_started_at.elapsed() ); - miniblock_timestamp + miniblock_header + } + + /// "virtual_blocks_per_miniblock" will be created either if the miniblock_number % virtual_blocks_interval == 0 or + /// the miniblock is the first one in the batch. + /// For instance: + /// 1) If we want to have virtual block speed the same as the batch speed, virtual_block_interval = 10^9 and virtual_blocks_per_miniblock = 1 + /// 2) If we want to have roughly 1 virtual block per 2 miniblocks, we need to have virtual_block_interval = 2, and virtual_blocks_per_miniblock = 1 + /// 3) If we want to have 4 virtual blocks per miniblock, we need to have virtual_block_interval = 1, and virtual_blocks_per_miniblock = 4. + fn get_virtual_blocks_count(&self, first_in_batch: bool, miniblock_number: u32) -> u32 { + if first_in_batch || miniblock_number % self.virtual_blocks_interval == 0 { + return self.virtual_blocks_per_miniblock; + } + + 0 } } diff --git a/core/bin/zksync_core/src/state_keeper/io/mod.rs b/core/bin/zksync_core/src/state_keeper/io/mod.rs index 6cd6bdab6358..69bf61055afe 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mod.rs @@ -6,11 +6,11 @@ use std::{ time::{Duration, Instant}, }; -use vm::vm_with_bootloader::{BlockContextMode, DerivedBlockContext}; -use vm::zk_evm::block_properties::BlockProperties; -use vm::VmBlockResult; -use zksync_contracts::BaseSystemContracts; +use vm::FinishedL1Batch; +use vm::{L1BatchEnv, SystemEnv}; + use zksync_dal::ConnectionPool; +use zksync_types::witness_block_state::WitnessBlockState; use zksync_types::{ block::MiniblockReexecuteData, protocol_version::ProtocolUpgradeTx, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, @@ -27,17 +27,6 @@ use super::updates::{MiniblockSealCommand, UpdatesManager}; #[cfg(test)] mod tests; -/// System parameters for L1 batch. -/// It includes system params such as Basic System Contracts and zkPorter configuration -/// and l1batch-specific parameters like timestamp, number, etc. -#[derive(Debug, Clone)] -pub struct L1BatchParams { - pub context_mode: BlockContextMode, - pub properties: BlockProperties, - pub base_system_contracts: BaseSystemContracts, - pub protocol_version: ProtocolVersionId, -} - /// Contains information about the un-synced execution state: /// Batch data and transactions that were executed before and are marked as so in the DB, /// but aren't a part of a sealed batch. @@ -50,11 +39,27 @@ pub struct L1BatchParams { pub struct PendingBatchData { /// Data used to initialize the pending batch. We have to make sure that all the parameters /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. - pub(crate) params: L1BatchParams, + pub(crate) l1_batch_env: L1BatchEnv, + pub(crate) system_env: SystemEnv, /// List of miniblocks and corresponding transactions that were executed within batch. pub(crate) pending_miniblocks: Vec, } +#[derive(Debug, Copy, Clone, Default)] +pub struct MiniblockParams { + /// The timestamp of the miniblock + pub(crate) timestamp: u64, + /// The maximal number of virtual blocks that can be created within this miniblock. + /// During the migration from displaying users batch.number to L2 block (i.e. miniblock) number in Q3 2023 + /// in order to make the process smoother for users, we temporarily display the virtual blocks for users. + /// + /// Virtual blocks start their number with batch number and will increase until they reach the miniblock number. + /// Note that it is the *maximal* number of virtual blocks that can be created within this miniblock since + /// once the virtual blocks' number reaches the miniblock number, they will never be allowed to exceed those, i.e. + /// any "excess" created blocks will be ignored. + pub(crate) virtual_blocks: u32, +} + /// `StateKeeperIO` provides the interactive layer for the state keeper: /// it's used to receive volatile parameters (such as batch parameters), and also it's used to perform /// mutable operations on the persistent state (e.g. persist executed batches). @@ -69,14 +74,16 @@ pub trait StateKeeperIO: 'static + Send { async fn load_pending_batch(&mut self) -> Option; /// Blocks for up to `max_wait` until the parameters for the next L1 batch are available. /// Returns the data required to initialize the VM for the next batch. - async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; + async fn wait_for_new_batch_params( + &mut self, + max_wait: Duration, + ) -> Option<(SystemEnv, L1BatchEnv)>; /// Blocks for up to `max_wait` until the parameters for the next miniblock are available. - /// Right now it's only a timestamp. async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, prev_miniblock_timestamp: u64, - ) -> Option; + ) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option; @@ -90,9 +97,10 @@ pub trait StateKeeperIO: 'static + Send { /// Marks the L1 batch as sealed. async fn seal_l1_batch( &mut self, - block_result: VmBlockResult, + witness_block_state: Option, updates_manager: UpdatesManager, - block_context: DerivedBlockContext, + l1_batch_env: &L1BatchEnv, + finished_batch: FinishedL1Batch, ); /// Loads protocol version of the previous l1 batch. async fn load_previous_batch_version_id(&mut self) -> Option; @@ -136,7 +144,7 @@ impl MiniblockSealerHandle { /// enough of them are processed (i.e., there is backpressure). pub async fn submit(&mut self, command: MiniblockSealCommand) { let miniblock_number = command.miniblock_number; - vlog::debug!( + tracing::debug!( "Enqueuing sealing command for miniblock #{miniblock_number} with #{} txs (L1 batch #{})", command.miniblock.executed_transactions.len(), command.l1_batch_number @@ -156,7 +164,7 @@ impl MiniblockSealerHandle { let elapsed = start.elapsed(); let queue_capacity = self.commands_sender.capacity(); - vlog::debug!( + tracing::debug!( "Enqueued sealing command for miniblock #{miniblock_number} (took {elapsed:?}; \ available queue capacity: {queue_capacity})" ); @@ -178,7 +186,7 @@ impl MiniblockSealerHandle { /// Waits until all previously submitted commands are fully processed by the sealer. pub async fn wait_for_all_commands(&mut self) { - vlog::debug!( + tracing::debug!( "Requested waiting for miniblock seal queue to empty; current available capacity: {}", self.commands_sender.capacity() ); @@ -190,7 +198,7 @@ impl MiniblockSealerHandle { } let elapsed = start.elapsed(); - vlog::debug!("Miniblock seal queue is emptied (took {elapsed:?})"); + tracing::debug!("Miniblock seal queue is emptied (took {elapsed:?})"); // Since this method called from outside is essentially a no-op if `self.is_sync`, // we don't report its metrics in this case. @@ -245,16 +253,16 @@ impl MiniblockSealer { /// Seals miniblocks as they are received from the [`MiniblockSealerHandle`]. This should be run /// on a separate Tokio task. - pub async fn run(mut self) { + pub async fn run(mut self) -> anyhow::Result<()> { if self.is_sync { - vlog::info!("Starting synchronous miniblock sealer"); + tracing::info!("Starting synchronous miniblock sealer"); } else if let Some(sender) = self.commands_sender.upgrade() { - vlog::info!( + tracing::info!( "Starting async miniblock sealer with queue capacity {}", sender.max_capacity() ); } else { - vlog::warn!("Miniblock sealer not started, since its handle is already dropped"); + tracing::warn!("Miniblock sealer not started, since its handle is already dropped"); } let mut miniblock_seal_delta: Option = None; @@ -271,16 +279,17 @@ impl MiniblockSealer { completable.completion_sender.send(()).ok(); // ^ We don't care whether anyone listens to the processing progress } + Ok(()) } async fn next_command(&mut self) -> Option> { - vlog::debug!("Polling miniblock seal queue for next command"); + tracing::debug!("Polling miniblock seal queue for next command"); let start = Instant::now(); let command = self.commands_receiver.recv().await; let elapsed = start.elapsed(); if let Some(completable) = &command { - vlog::debug!( + tracing::debug!( "Received command to seal miniblock #{} (polling took {elapsed:?})", completable.command.miniblock_number ); diff --git a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs index 8136581108fc..0c3933a0562b 100644 --- a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs @@ -8,34 +8,32 @@ use std::{ time::{Duration, Instant}, }; -use vm::{ - vm_with_bootloader::{ - get_bootloader_memory, BlockContextMode, DerivedBlockContext, TxExecutionMode, - }, - VmBlockResult, -}; +use vm::{FinishedL1Batch, L1BatchEnv}; + use zksync_config::constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_dal::StorageProcessor; + +use zksync_types::{ + block::unpack_block_info, CURRENT_VIRTUAL_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_ADDRESS, +}; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, l2_to_l1_log::L2ToL1Log, + storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, tx::{ tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, TransactionExecutionResult, }, - zk_evm::aux_structures::LogQuery, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, MiniblockNumber, StorageKey, - StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, H256, U256, + AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, LogQuery, MiniblockNumber, + StorageKey, StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, H256, }; -use zksync_utils::{miniblock_hash, time::millis_since_epoch, u256_to_h256}; +// TODO (SMA-1206): use seconds instead of milliseconds. +use zksync_utils::{h256_to_u256, time::millis_since_epoch, u256_to_h256}; -use crate::state_keeper::{ - extractors, - io::common::set_missing_initial_writes_indices, - updates::{L1BatchUpdates, MiniblockSealCommand, UpdatesManager}, -}; +use crate::state_keeper::extractors; +use crate::state_keeper::updates::{MiniblockSealCommand, UpdatesManager}; #[derive(Debug, Clone, Copy)] struct SealProgressMetricNames { @@ -92,7 +90,9 @@ impl SealProgress { let elapsed = self.stage_start.elapsed(); if elapsed > MIN_STAGE_DURATION_TO_REPORT { let target = self.metric_names.target; - vlog::debug!("{target} execution stage {stage} took {elapsed:?} with count {count:?}"); + tracing::debug!( + "{target} execution stage {stage} took {elapsed:?} with count {count:?}" + ); } let (l1_batch_labels, miniblock_labels); @@ -128,32 +128,20 @@ impl UpdatesManager { mut self, storage: &mut StorageProcessor<'_>, current_miniblock_number: MiniblockNumber, - current_l1_batch_number: L1BatchNumber, - block_result: VmBlockResult, - block_context: DerivedBlockContext, + l1_batch_env: &L1BatchEnv, + finished_batch: FinishedL1Batch, l2_erc20_bridge_addr: Address, ) { let started_at = Instant::now(); let mut progress = SealProgress::for_l1_batch(); let mut transaction = storage.start_transaction().await; - // The vm execution was paused right after the last transaction was executed. - // There is some post-processing work that the VM needs to do before the block is fully processed. - let VmBlockResult { - full_result, - block_tip_result, - } = block_result; - assert!( - full_result.revert_reason.is_none(), - "VM must not revert when finalizing block. Revert reason: {:?}", - full_result.revert_reason - ); progress.end_stage("vm_finalization", None); - self.extend_from_fictive_transaction(block_tip_result.logs); + self.extend_from_fictive_transaction(finished_batch.block_tip_execution_result); // Seal fictive miniblock with last events and storage logs. let miniblock_command = self.seal_miniblock_command( - current_l1_batch_number, + l1_batch_env.number, current_miniblock_number, l2_erc20_bridge_addr, ); @@ -161,7 +149,8 @@ impl UpdatesManager { progress.end_stage("fictive_miniblock", None); let (_, deduped_log_queries) = sort_storage_access_queries( - full_result + finished_batch + .final_execution_state .storage_log_queries .iter() .map(|log| &log.log_query), @@ -169,55 +158,57 @@ impl UpdatesManager { progress.end_stage("log_deduplication", Some(deduped_log_queries.len())); let (l1_tx_count, l2_tx_count) = l1_l2_tx_count(&self.l1_batch.executed_transactions); - let (writes_count, reads_count) = - storage_log_query_write_read_counts(&full_result.storage_log_queries); + let (writes_count, reads_count) = storage_log_query_write_read_counts( + &finished_batch.final_execution_state.storage_log_queries, + ); let (dedup_writes_count, dedup_reads_count) = log_query_write_read_counts(deduped_log_queries.iter()); - vlog::info!( + + tracing::info!( "Sealing L1 batch {current_l1_batch_number} with {total_tx_count} \ ({l2_tx_count} L2 + {l1_tx_count} L1) txs, {l2_to_l1_log_count} l2_l1_logs, \ {event_count} events, {reads_count} reads ({dedup_reads_count} deduped), \ {writes_count} writes ({dedup_writes_count} deduped)", total_tx_count = l1_tx_count + l2_tx_count, - l2_to_l1_log_count = full_result.l2_to_l1_logs.len(), - event_count = full_result.events.len() + l2_to_l1_log_count = finished_batch.final_execution_state.l2_to_l1_logs.len(), + event_count = finished_batch.final_execution_state.events.len(), + current_l1_batch_number = l1_batch_env.number ); - let (prev_hash, prev_timestamp) = - extractors::wait_for_prev_l1_batch_params(&mut transaction, current_l1_batch_number) - .await; - let timestamp = block_context.context.block_timestamp; + let (_prev_hash, prev_timestamp) = + extractors::wait_for_prev_l1_batch_params(&mut transaction, l1_batch_env.number).await; assert!( - prev_timestamp < timestamp, + prev_timestamp < l1_batch_env.timestamp, "Cannot seal L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ meaning that L1 batch will be rejected by the bootloader", - current_l1_batch_number, + l1_batch_env.number, extractors::display_timestamp(prev_timestamp), - extractors::display_timestamp(timestamp) + extractors::display_timestamp(l1_batch_env.timestamp) ); let l1_batch = L1BatchHeader { - number: current_l1_batch_number, + number: l1_batch_env.number, is_finished: true, - timestamp, - fee_account_address: block_context.context.operator_address, + timestamp: l1_batch_env.timestamp, + fee_account_address: l1_batch_env.fee_account, priority_ops_onchain_data: self.l1_batch.priority_ops_onchain_data.clone(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, - l2_to_l1_logs: full_result.l2_to_l1_logs, - l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), + l2_to_l1_logs: finished_batch.final_execution_state.l2_to_l1_logs, + l2_to_l1_messages: extract_long_l2_to_l1_messages( + &finished_batch.final_execution_state.events, + ), bloom: Default::default(), - used_contract_hashes: full_result.used_contract_hashes, - base_fee_per_gas: block_context.base_fee, + used_contract_hashes: finished_batch.final_execution_state.used_contract_hashes, + base_fee_per_gas: l1_batch_env.base_fee(), l1_gas_price: self.l1_gas_price(), l2_fair_gas_price: self.fair_l2_gas_price(), base_system_contracts_hashes: self.base_system_contract_hashes(), protocol_version: Some(self.protocol_version()), }; - let block_context_properties = BlockContextMode::NewBlock(block_context, prev_hash); let initial_bootloader_contents = - Self::initial_bootloader_memory(&self.l1_batch, block_context_properties); + finished_batch.final_bootloader_memory.unwrap_or_default(); transaction .blocks_dal() @@ -231,14 +222,14 @@ impl UpdatesManager { transaction .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(current_l1_batch_number) + .mark_miniblocks_as_executed_in_l1_batch(l1_batch_env.number) .await; progress.end_stage("set_l1_batch_number_for_miniblocks", None); transaction .transactions_dal() .mark_txs_as_executed_in_l1_batch( - current_l1_batch_number, + l1_batch_env.number, &self.l1_batch.executed_transactions, ) .await; @@ -249,7 +240,7 @@ impl UpdatesManager { .partition(|log_query| log_query.rw_flag); transaction .storage_logs_dedup_dal() - .insert_protective_reads(current_l1_batch_number, &protective_reads) + .insert_protective_reads(l1_batch_env.number, &protective_reads) .await; progress.end_stage("insert_protective_reads", Some(protective_reads.len())); @@ -276,13 +267,9 @@ impl UpdatesManager { }) .collect(); - // One-time migration completion for initial writes' indices. - set_missing_initial_writes_indices(&mut transaction).await; - progress.end_stage("set_missing_initial_writes_indices", None); - transaction .storage_logs_dedup_dal() - .insert_initial_writes(current_l1_batch_number, &written_storage_keys) + .insert_initial_writes(l1_batch_env.number, &written_storage_keys) .await; progress.end_stage("insert_initial_writes", Some(deduplicated_writes.len())); @@ -299,43 +286,12 @@ impl UpdatesManager { self.report_l1_batch_metrics( started_at, - current_l1_batch_number, - timestamp, + l1_batch_env.number, + l1_batch_env.timestamp, &writes_metrics, ); } - fn initial_bootloader_memory( - updates_accumulator: &L1BatchUpdates, - block_context: BlockContextMode, - ) -> Vec<(usize, U256)> { - let transactions_data = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.transaction.clone().into()) - .collect(); - - let refunds = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.operator_suggested_refund) - .collect(); - - let compressed_bytecodes = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.compressed_bytecodes.clone()) - .collect(); - - get_bootloader_memory( - transactions_data, - refunds, - compressed_bytecodes, - TxExecutionMode::VerifyExecute, - block_context, - ) - } - fn report_l1_batch_metrics( &self, started_at: Instant, @@ -368,7 +324,7 @@ impl UpdatesManager { "server.state_keeper.l1_batch.sealed_time", started_at.elapsed(), ); - vlog::debug!( + tracing::debug!( "Sealed L1 batch {current_l1_batch_number} in {:?}", started_at.elapsed() ); @@ -400,7 +356,7 @@ impl MiniblockSealCommand { let (l1_tx_count, l2_tx_count) = l1_l2_tx_count(&self.miniblock.executed_transactions); let (writes_count, reads_count) = storage_log_query_write_read_counts(&self.miniblock.storage_logs); - vlog::info!( + tracing::info!( "Sealing miniblock {miniblock_number} (L1 batch {l1_batch_number}) \ with {total_tx_count} ({l2_tx_count} L2 + {l1_tx_count} L1) txs, {event_count} events, \ {reads_count} reads, {writes_count} writes", @@ -412,14 +368,15 @@ impl MiniblockSealCommand { let miniblock_header = MiniblockHeader { number: miniblock_number, timestamp: self.miniblock.timestamp, - hash: miniblock_hash(miniblock_number), + hash: self.miniblock.get_miniblock_hash(), l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, base_fee_per_gas: self.base_fee_per_gas, l1_gas_price: self.l1_gas_price, l2_fair_gas_price: self.fair_l2_gas_price, base_system_contracts_hashes: self.base_system_contracts_hashes, - protocol_version: Some(self.protocol_version), + protocol_version: self.protocol_version, + virtual_blocks: self.miniblock.virtual_blocks, }; transaction @@ -441,7 +398,7 @@ impl MiniblockSealCommand { Some(self.miniblock.executed_transactions.len()), ); - let write_logs = self.extract_write_logs(is_fictive); + let write_logs = self.extract_deduplicated_write_logs(is_fictive); let write_log_count = write_logs.iter().map(|(_, logs)| logs.len()).sum(); transaction @@ -502,9 +459,20 @@ impl MiniblockSealCommand { .await; progress.end_stage("insert_l2_to_l1_logs", Some(l2_to_l1_log_count)); + let current_l2_virtual_block_info = transaction + .storage_dal() + .get_by_key(&StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + CURRENT_VIRTUAL_BLOCK_INFO_POSITION, + )) + .await + .unwrap_or_default(); + let (current_l2_virtual_block_number, _) = + unpack_block_info(h256_to_u256(current_l2_virtual_block_info)); + transaction.commit().await; progress.end_stage("commit_miniblock", None); - self.report_miniblock_metrics(started_at); + self.report_miniblock_metrics(started_at, current_l2_virtual_block_number); } /// Performs several sanity checks to make sure that the miniblock is valid. @@ -529,25 +497,39 @@ impl MiniblockSealCommand { } } - fn extract_write_logs(&self, is_fictive: bool) -> Vec<(H256, Vec)> { - let logs = self.miniblock.storage_logs.iter(); - let grouped_logs = logs.group_by(|log| log.log_query.tx_number_in_block); + fn extract_deduplicated_write_logs(&self, is_fictive: bool) -> Vec<(H256, Vec)> { + let mut storage_writes_deduplicator = StorageWritesDeduplicator::new(); + storage_writes_deduplicator.apply( + self.miniblock + .storage_logs + .iter() + .filter(|log| log.log_query.rw_flag), + ); + let deduplicated_logs = storage_writes_deduplicator.into_modified_key_values(); - let grouped_logs = grouped_logs.into_iter().map(|(tx_index, logs)| { - let tx_hash = if is_fictive { - assert_eq!(tx_index as usize, self.first_tx_index); - H256::zero() - } else { - self.transaction(tx_index as usize).hash() - }; - let logs = logs.filter_map(|log| { - log.log_query - .rw_flag - .then(|| StorageLog::from_log_query(log)) - }); - (tx_hash, logs.collect()) - }); - grouped_logs.collect() + deduplicated_logs + .into_iter() + .map(|(key, ModifiedSlot { value, tx_index })| (tx_index, (key, value))) + .sorted_by_key(|(tx_index, _)| *tx_index) + .group_by(|(tx_index, _)| *tx_index) + .into_iter() + .map(|(tx_index, logs)| { + let tx_hash = if is_fictive { + assert_eq!(tx_index as usize, self.first_tx_index); + H256::zero() + } else { + self.transaction(tx_index as usize).hash() + }; + ( + tx_hash, + logs.into_iter() + .map(|(_, (key, value))| { + StorageLog::new_write_log(key, u256_to_h256(value)) + }) + .collect(), + ) + }) + .collect() } fn transaction(&self, index: usize) -> &Transaction { @@ -562,6 +544,7 @@ impl MiniblockSealCommand { for (key, (_, value)) in unique_updates { if *key.account().address() == ACCOUNT_CODE_STORAGE_ADDRESS { let bytecode_hash = *value; + // TODO(SMA-1554): Support contracts deletion. // For now, we expected that if the `bytecode_hash` is zero, the contract was not deployed // in the first place, so we don't do anything if bytecode_hash != H256::zero() { @@ -611,7 +594,7 @@ impl MiniblockSealCommand { }) } - fn report_miniblock_metrics(&self, started_at: Instant) { + fn report_miniblock_metrics(&self, started_at: Instant, latest_virtual_block_number: u64) { let miniblock_number = self.miniblock_number; metrics::histogram!( @@ -634,8 +617,13 @@ impl MiniblockSealCommand { miniblock_number.0 as f64, "stage" => "sealed" ); + metrics::gauge!( + "server.miniblock.virtual_block_number", + latest_virtual_block_number as f64, + "stage" => "sealed" + ); - vlog::debug!( + tracing::debug!( "Sealed miniblock {miniblock_number} in {:?}", started_at.elapsed() ); diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs index 1968f6b54b7a..8f8f894ae93a 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs @@ -3,22 +3,25 @@ use futures::FutureExt; use std::time::Duration; use db_test_macro::db_test; -use vm::vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, BlockContextMode}; + +use vm::utils::fee::derive_base_fee_and_gas_per_pubdata; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{ block::BlockGasCount, tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, - MiniblockNumber, ProtocolVersionId, StorageKey, StorageLog, VmEvent, H256, U256, + MiniblockNumber, ProtocolVersionId, StorageKey, VmEvent, H256, U256, }; use zksync_utils::time::seconds_since_epoch; +use crate::state_keeper::tests::{create_l1_batch_metadata, default_l1_batch_env}; + use crate::state_keeper::{ - io::{common::set_missing_initial_writes_indices, MiniblockSealer, StateKeeperIO}, + io::{MiniblockParams, MiniblockSealer, StateKeeperIO}, mempool_actor::l2_tx_filter, tests::{ - create_execution_result, create_l1_batch_metadata, create_transaction, - create_updates_manager, default_block_context, default_vm_block_result, Query, + create_execution_result, create_transaction, create_updates_manager, + default_vm_block_result, Query, }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }; @@ -151,7 +154,7 @@ async fn test_timestamps_are_distinct( .wait_for_new_batch_params(Duration::from_secs(10)) .await .expect("No batch params in the test mempool"); - assert!(batch_params.context_mode.timestamp() > prev_miniblock_timestamp); + assert!(batch_params.1.timestamp > prev_miniblock_timestamp); } #[db_test] @@ -182,7 +185,8 @@ async fn l1_batch_timestamp_respects_prev_miniblock_with_clock_skew( #[db_test] async fn processing_storage_logs_when_sealing_miniblock(connection_pool: ConnectionPool) { - let mut miniblock = MiniblockUpdates::new(0); + let mut miniblock = + MiniblockUpdates::new(0, 1, H256::zero(), 1, Some(ProtocolVersionId::latest())); let tx = create_transaction(10, 100); let storage_logs = [ @@ -204,6 +208,7 @@ async fn processing_storage_logs_when_sealing_miniblock(connection_pool: Connect BlockGasCount::default(), ExecutionMetrics::default(), vec![], + vec![], ); let tx = create_transaction(10, 100); @@ -221,6 +226,7 @@ async fn processing_storage_logs_when_sealing_miniblock(connection_pool: Connect BlockGasCount::default(), ExecutionMetrics::default(), vec![], + vec![], ); let l1_batch_number = L1BatchNumber(2); @@ -233,12 +239,12 @@ async fn processing_storage_logs_when_sealing_miniblock(connection_pool: Connect fair_l2_gas_price: 100, base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), - protocol_version: ProtocolVersionId::default(), + protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), }; let mut conn = connection_pool.access_storage_tagged("state_keeper").await; conn.protocol_versions_dal() - .save_protocol_version(Default::default()) + .save_protocol_version_with_tx(Default::default()) .await; seal_command.seal(&mut conn).await; @@ -270,7 +276,8 @@ async fn processing_storage_logs_when_sealing_miniblock(connection_pool: Connect #[db_test] async fn processing_events_when_sealing_miniblock(pool: ConnectionPool) { let l1_batch_number = L1BatchNumber(2); - let mut miniblock = MiniblockUpdates::new(0); + let mut miniblock = + MiniblockUpdates::new(0, 1, H256::zero(), 1, Some(ProtocolVersionId::latest())); let events = (0_u8..10).map(|i| VmEvent { location: (l1_batch_number, u32::from(i / 4)), @@ -282,13 +289,14 @@ async fn processing_events_when_sealing_miniblock(pool: ConnectionPool) { for (i, events_chunk) in events.chunks(4).enumerate() { let tx = create_transaction(10, 100); let mut execution_result = create_execution_result(i as u16, []); - execution_result.result.logs.events = events_chunk.to_vec(); + execution_result.logs.events = events_chunk.to_vec(); miniblock.extend_from_executed_transaction( tx, execution_result, BlockGasCount::default(), ExecutionMetrics::default(), vec![], + vec![], ); } @@ -302,12 +310,12 @@ async fn processing_events_when_sealing_miniblock(pool: ConnectionPool) { fair_l2_gas_price: 100, base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), - protocol_version: ProtocolVersionId::default(), + protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), }; let mut conn = pool.access_storage_tagged("state_keeper").await; conn.protocol_versions_dal() - .save_protocol_version(Default::default()) + .save_protocol_version_with_tx(Default::default()) .await; seal_command.seal(&mut conn).await; @@ -344,13 +352,11 @@ async fn test_miniblock_and_l1_batch_processing( .create_test_mempool_io(pool.clone(), miniblock_sealer_capacity) .await; - let mut block_context = default_block_context(); - block_context.context.block_timestamp = 100; // change timestamp to pass monotonicity check - let block_context_mode = BlockContextMode::NewBlock(block_context, 0.into()); + let l1_batch_env = default_l1_batch_env(0, 1, Address::random()); let mut updates = UpdatesManager::new( - &block_context_mode, + l1_batch_env, BaseSystemContractsHashes::default(), - ProtocolVersionId::default(), + ProtocolVersionId::latest(), ); let tx = create_transaction(10, 100); @@ -360,13 +366,19 @@ async fn test_miniblock_and_l1_batch_processing( vec![], BlockGasCount::default(), ExecutionMetrics::default(), + vec![], ); mempool.seal_miniblock(&updates).await; - updates.push_miniblock(1); + updates.push_miniblock(MiniblockParams { + timestamp: 1, + virtual_blocks: 1, + }); - let block_result = default_vm_block_result(); + let finished_batch = default_vm_block_result(); + + let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); mempool - .seal_l1_batch(block_result, updates, block_context) + .seal_l1_batch(None, updates, &l1_batch_env, finished_batch) .await; // Check that miniblock #1 and L1 batch #1 are persisted. @@ -471,123 +483,6 @@ async fn miniblock_sealer_handle_parallel_processing(pool: ConnectionPool) { sealer_handle.wait_for_all_commands().await; } -#[db_test] -async fn initial_writes_index_migration(pool: ConnectionPool) { - let tester = Tester::new(); - - // Genesis is needed for proper mempool initialization. - tester.genesis(&pool).await; - let (last_index, _) = { - let mut storage = pool.access_storage().await; - storage - .storage_logs_dedup_dal() - .max_set_enumeration_index() - .await - .unwrap() - }; - - tester.insert_miniblock(&pool, 1, 100, 100, 100).await; - tester.insert_sealed_batch(&pool, 1).await; - let keys1: Vec<_> = vec![2u64, 3, 5, 7] - .into_iter() - .map(|k| { - StorageKey::new( - AccountTreeId::new(Address::from_low_u64_be(1)), - H256::from_low_u64_be(k), - ) - }) - .collect(); - let storage_logs: Vec<_> = keys1 - .iter() - .map(|k| StorageLog::new_write_log(*k, H256::random())) - .collect(); - { - let mut storage = pool.access_storage().await; - storage - .storage_logs_dal() - .insert_storage_logs(1u32.into(), &[(H256::zero(), storage_logs)]) - .await; - storage - .storage_logs_dedup_dal() - .insert_initial_writes(1u32.into(), &keys1) - .await; - } - - tester.insert_miniblock(&pool, 2, 100, 100, 100).await; - tester.insert_sealed_batch(&pool, 2).await; - let keys2: Vec<_> = vec![1u64, 4, 6, 8] - .into_iter() - .map(|k| { - StorageKey::new( - AccountTreeId::new(Address::from_low_u64_be(1)), - H256::from_low_u64_be(k), - ) - }) - .collect(); - let storage_logs: Vec<_> = keys2 - .iter() - .map(|k| StorageLog::new_write_log(*k, H256::random())) - .collect(); - { - let mut storage = pool.access_storage().await; - storage - .storage_logs_dal() - .insert_storage_logs(2u32.into(), &[(H256::zero(), storage_logs)]) - .await; - storage - .storage_logs_dedup_dal() - .insert_initial_writes(2u32.into(), &keys2) - .await; - } - - let expected: Vec<_> = keys1 - .iter() - .chain(&keys2) - .enumerate() - .map(|(i, k)| (k.hashed_key(), i as u64 + last_index + 1)) - .collect(); - let actual = { - let mut storage = pool.access_storage().await; - let iw1 = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(1u32.into()) - .await; - let iw2 = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(2u32.into()) - .await; - - iw1.into_iter() - .chain(iw2) - .map(|(key, index)| (key, index.unwrap())) - .collect::>() - }; - assert_eq!(expected, actual); - - { - let mut storage = pool.access_storage().await; - storage.storage_logs_dedup_dal().reset_indices().await; - set_missing_initial_writes_indices(&mut storage).await; - }; - let actual = { - let mut storage = pool.access_storage().await; - let iw1 = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(1u32.into()) - .await; - let iw2 = storage - .storage_logs_dedup_dal() - .initial_writes_for_batch(2u32.into()) - .await; - - iw1.into_iter() - .chain(iw2) - .map(|(key, index)| (key, index.unwrap())) - .collect::>() - }; - assert_eq!(expected, actual); -} - /// Ensure that subsequent miniblocks that belong to the same L1 batch have different timestamps #[db_test] async fn different_timestamp_for_miniblocks_in_same_batch(connection_pool: ConnectionPool) { @@ -597,7 +492,10 @@ async fn different_timestamp_for_miniblocks_in_same_batch(connection_pool: Conne tester.genesis(&connection_pool).await; let (mut mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; let current_timestamp = seconds_since_epoch(); - let next_timestamp = mempool + let MiniblockParams { + timestamp: next_timestamp, + .. + } = mempool .wait_for_new_miniblock_params(Duration::from_secs(10), current_timestamp) .await .unwrap(); diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs index 80211e012694..d495bb90dc8a 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs @@ -1,6 +1,7 @@ //! Testing harness for the IO. use std::{sync::Arc, time::Duration}; +use vm::constants::BLOCK_GAS_LIMIT; use zksync_config::configs::chain::StateKeeperConfig; use zksync_config::GasAdjusterConfig; @@ -76,6 +77,8 @@ impl Tester { fair_l2_gas_price: self.fair_l2_gas_price(), bootloader_hash: base_contract_hashes.bootloader, default_aa_hash: base_contract_hashes.default_aa, + virtual_blocks_interval: 1, + virtual_blocks_per_miniblock: 1, ..StateKeeperConfig::default() }; let l2_erc20_bridge_addr = Address::repeat_byte(0x5a); // Isn't relevant. @@ -87,6 +90,8 @@ impl Tester { &config, Duration::from_secs(1), l2_erc20_bridge_addr, + BLOCK_GAS_LIMIT, + L2ChainId(270), ) .await; @@ -104,6 +109,7 @@ impl Tester { &mut storage, Address::repeat_byte(0x01), L2ChainId(270), + ProtocolVersionId::latest(), &self.base_system_contracts, &get_system_smart_contracts(), L1VerifierConfig::default(), @@ -135,6 +141,7 @@ impl Tester { l2_fair_gas_price, base_system_contracts_hashes: self.base_system_contracts.hashes(), protocol_version: Some(ProtocolVersionId::latest()), + virtual_blocks: 0, }) .await; } diff --git a/core/bin/zksync_core/src/state_keeper/keeper.rs b/core/bin/zksync_core/src/state_keeper/keeper.rs index b96a841508c2..32fbe6f94f03 100644 --- a/core/bin/zksync_core/src/state_keeper/keeper.rs +++ b/core/bin/zksync_core/src/state_keeper/keeper.rs @@ -2,18 +2,21 @@ use tokio::sync::watch; use std::time::{Duration, Instant}; -use vm::TxRevertReason; use zksync_types::{ block::MiniblockReexecuteData, l2::TransactionType, protocol_version::ProtocolUpgradeTx, - storage_writes_deduplicator::StorageWritesDeduplicator, - tx::tx_execution_info::TxExecutionStatus, Transaction, + storage_writes_deduplicator::StorageWritesDeduplicator, Transaction, }; +use vm::{Halt, L1BatchEnv, SystemEnv}; + use crate::gas_tracker::gas_count_from_writes; + +use crate::state_keeper::io::MiniblockParams; + use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, L1BatchExecutorBuilder, TxExecutionResult}, extractors, - io::{L1BatchParams, PendingBatchData, StateKeeperIO}, + io::{PendingBatchData, StateKeeperIO}, seal_criteria::{SealData, SealManager, SealResolution}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, @@ -59,21 +62,22 @@ impl ZkSyncStateKeeper { } } - pub async fn run(mut self) { + pub async fn run(mut self) -> anyhow::Result<()> { match self.run_inner().await { Ok(()) => { // Normally, state keeper can only exit its routine if the task was cancelled. - panic!("State keeper exited the main loop") + anyhow::bail!("State keeper exited the main loop"); } Err(Canceled) => { - vlog::info!("Stop signal received, state keeper is shutting down"); + tracing::info!("Stop signal received, state keeper is shutting down"); + Ok(()) } } } /// Fallible version of `run` routine that allows to easily exit upon cancellation. async fn run_inner(&mut self) -> Result<(), Canceled> { - vlog::info!( + tracing::info!( "Starting state keeper. Next l1 batch to seal: {}, Next miniblock to seal: {}", self.io.current_l1_batch_number(), self.io.current_miniblock_number() @@ -81,11 +85,12 @@ impl ZkSyncStateKeeper { // Re-execute pending batch if it exists. Otherwise, initialize a new batch. let PendingBatchData { - params, + mut l1_batch_env, + mut system_env, pending_miniblocks, } = match self.io.load_pending_batch().await { Some(params) => { - vlog::info!( + tracing::info!( "There exists a pending batch consisting of {} miniblocks, the first one is {}", params.pending_miniblocks.len(), params @@ -97,43 +102,33 @@ impl ZkSyncStateKeeper { params } None => { - vlog::info!("There is no open pending batch, starting a new empty batch"); + tracing::info!("There is no open pending batch, starting a new empty batch"); + let (system_env, l1_batch_env) = self.wait_for_new_batch_params().await?; PendingBatchData { - params: self.wait_for_new_batch_params().await?, + l1_batch_env, pending_miniblocks: Vec::new(), + system_env, } } }; - let mut l1_batch_params = params; + let protocol_version = system_env.version; let mut updates_manager = UpdatesManager::new( - &l1_batch_params.context_mode, - l1_batch_params.base_system_contracts.hashes(), - l1_batch_params.protocol_version, + l1_batch_env.clone(), + system_env.base_system_smart_contracts.hashes(), + protocol_version, ); - let previous_batch_protocol_version = self.io.load_previous_batch_version_id().await; - let version_changed = match previous_batch_protocol_version { - Some(previous_batch_protocol_version) => { - l1_batch_params.protocol_version != previous_batch_protocol_version - } - // None is only the case for old blocks. Match will be removed when migration will be done. - None => false, - }; + let previous_batch_protocol_version = + self.io.load_previous_batch_version_id().await.unwrap(); + let version_changed = protocol_version != previous_batch_protocol_version; let mut protocol_upgrade_tx = if pending_miniblocks.is_empty() && version_changed { - self.io - .load_upgrade_tx(l1_batch_params.protocol_version) - .await + self.io.load_upgrade_tx(protocol_version).await } else if !pending_miniblocks.is_empty() && version_changed { // Sanity check: if `txs_to_reexecute` is not empty and upgrade tx is present for this block // then it must be the first one in `txs_to_reexecute`. - if self - .io - .load_upgrade_tx(l1_batch_params.protocol_version) - .await - .is_some() - { + if self.io.load_upgrade_tx(protocol_version).await.is_some() { let first_tx_to_reexecute = &pending_miniblocks[0].txs[0]; assert_eq!( first_tx_to_reexecute.tx_format(), @@ -148,8 +143,9 @@ impl ZkSyncStateKeeper { let mut batch_executor = self .batch_executor_base - .init_batch(l1_batch_params.clone()) + .init_batch(l1_batch_env.clone(), system_env.clone()) .await; + self.restore_state(&batch_executor, &mut updates_manager, pending_miniblocks) .await?; @@ -166,18 +162,24 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(&updates_manager).await; // We've sealed the miniblock that we had, but we still need to setup the timestamp // for the fictive miniblock. - let fictive_miniblock_timestamp = self + let new_miniblock_params = self .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) .await?; - updates_manager.push_miniblock(fictive_miniblock_timestamp); + Self::start_next_miniblock( + new_miniblock_params, + &mut updates_manager, + &batch_executor, + ) + .await; } - let block_result = batch_executor.finish_batch().await; + let (finished_batch, witness_block_state) = batch_executor.finish_batch().await; let sealed_batch_protocol_version = updates_manager.protocol_version(); self.io .seal_l1_batch( - block_result, + witness_block_state, updates_manager, - l1_batch_params.context_mode.inner_block_context(), + &l1_batch_env, + finished_batch, ) .await; if let Some(delta) = l1_batch_seal_delta { @@ -186,23 +188,21 @@ impl ZkSyncStateKeeper { l1_batch_seal_delta = Some(Instant::now()); // Start the new batch. - l1_batch_params = self.wait_for_new_batch_params().await?; - + (system_env, l1_batch_env) = self.wait_for_new_batch_params().await?; updates_manager = UpdatesManager::new( - &l1_batch_params.context_mode, - l1_batch_params.base_system_contracts.hashes(), - l1_batch_params.protocol_version, + l1_batch_env.clone(), + system_env.base_system_smart_contracts.hashes(), + system_env.version, ); batch_executor = self .batch_executor_base - .init_batch(l1_batch_params.clone()) + .init_batch(l1_batch_env.clone(), system_env.clone()) .await; - let version_changed = l1_batch_params.protocol_version != sealed_batch_protocol_version; + let version_changed = system_env.version != sealed_batch_protocol_version; + protocol_upgrade_tx = if version_changed { - self.io - .load_upgrade_tx(l1_batch_params.protocol_version) - .await + self.io.load_upgrade_tx(system_env.version).await } else { None }; @@ -216,7 +216,7 @@ impl ZkSyncStateKeeper { Ok(()) } - async fn wait_for_new_batch_params(&mut self) -> Result { + async fn wait_for_new_batch_params(&mut self) -> Result<(SystemEnv, L1BatchEnv), Canceled> { let params = loop { if let Some(params) = self.io.wait_for_new_batch_params(POLL_WAIT_DURATION).await { break params; @@ -229,7 +229,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_miniblock_params( &mut self, prev_miniblock_timestamp: u64, - ) -> Result { + ) -> Result { let params = loop { if let Some(params) = self .io @@ -243,6 +243,17 @@ impl ZkSyncStateKeeper { Ok(params) } + async fn start_next_miniblock( + params: MiniblockParams, + updates_manager: &mut UpdatesManager, + batch_executor: &BatchExecutorHandle, + ) { + updates_manager.push_miniblock(params); + batch_executor + .start_next_miniblock(updates_manager.miniblock.get_miniblock_env()) + .await; + } + /// Applies the "pending state" on the `UpdatesManager`. /// Pending state means transactions that were executed before the server restart. Before we continue processing the /// batch, we need to restore the state. We must ensure that every transaction is executed successfully. @@ -261,20 +272,30 @@ impl ZkSyncStateKeeper { for (index, miniblock) in miniblocks_to_reexecute.into_iter().enumerate() { // Push any non-first miniblock to updates manager. The first one was pushed when `updates_manager` was initialized. if index > 0 { - updates_manager.push_miniblock(miniblock.timestamp); + Self::start_next_miniblock( + MiniblockParams { + timestamp: miniblock.timestamp, + virtual_blocks: miniblock.virtual_blocks, + }, + updates_manager, + batch_executor, + ) + .await; } let miniblock_number = miniblock.number; - vlog::info!( + tracing::info!( "Starting to reexecute transactions from sealed miniblock {}", miniblock_number ); for tx in miniblock.txs { let result = batch_executor.execute_tx(tx.clone()).await; + let TxExecutionResult::Success { tx_result, tx_metrics, compressed_bytecodes, + call_tracer_result, .. } = result else { @@ -290,19 +311,21 @@ impl ZkSyncStateKeeper { execution_metrics: tx_execution_metrics, } = tx_metrics; - let exec_result_status = tx_result.status; - let tx_hash = tx.hash(); - let initiator_account = tx.initiator_account(); let is_l1 = tx.is_l1(); + let exec_result_status = tx_result.result.clone(); + let initiator_account = tx.initiator_account(); + updates_manager.extend_from_executed_transaction( tx, *tx_result, compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, + call_tracer_result, ); - vlog::debug!( + + tracing::debug!( "Finished re-executing tx {tx_hash} by {initiator_account} (is_l1: {is_l1}, \ #{idx_in_l1_batch} in L1 batch {l1_batch_number}, #{idx_in_miniblock} in miniblock {miniblock_number}); \ status: {exec_result_status:?}. L1 gas spent: {tx_l1_gas_this_tx:?}, total in L1 batch: {pending_l1_gas:?}, \ @@ -317,10 +340,10 @@ impl ZkSyncStateKeeper { } // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. - let new_timestamp = self + let new_miniblock_params = self .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) .await?; - updates_manager.push_miniblock(new_timestamp); + Self::start_next_miniblock(new_miniblock_params, updates_manager, batch_executor).await; Ok(()) } @@ -342,7 +365,7 @@ impl ZkSyncStateKeeper { .sealer .should_seal_l1_batch_unconditionally(updates_manager) { - vlog::debug!( + tracing::debug!( "L1 batch #{} should be sealed unconditionally as per sealing rules", self.io.current_l1_batch_number() ); @@ -350,23 +373,24 @@ impl ZkSyncStateKeeper { } if self.sealer.should_seal_miniblock(updates_manager) { - vlog::debug!( + tracing::debug!( "Miniblock #{} (L1 batch #{}) should be sealed as per sealing rules", self.io.current_miniblock_number(), self.io.current_l1_batch_number() ); self.io.seal_miniblock(updates_manager).await; - let new_timestamp = self + let new_miniblock_params = self .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) .await?; - vlog::debug!( + tracing::debug!( "Initialized new miniblock #{} (L1 batch #{}) with timestamp {}", self.io.current_miniblock_number(), self.io.current_l1_batch_number(), - extractors::display_timestamp(new_timestamp) + extractors::display_timestamp(new_miniblock_params.timestamp) ); - updates_manager.push_miniblock(new_timestamp); + Self::start_next_miniblock(new_miniblock_params, updates_manager, batch_executor) + .await; } let started_waiting = Instant::now(); @@ -376,7 +400,7 @@ impl ZkSyncStateKeeper { "server.state_keeper.waiting_for_tx", started_waiting.elapsed() ); - vlog::trace!("No new transactions. Waiting!"); + tracing::trace!("No new transactions. Waiting!"); continue; }; @@ -395,6 +419,7 @@ impl ZkSyncStateKeeper { let TxExecutionResult::Success { tx_result, tx_metrics, + call_tracer_result, compressed_bytecodes, .. } = exec_result @@ -413,6 +438,7 @@ impl ZkSyncStateKeeper { compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, + call_tracer_result, ); } SealResolution::ExcludeAndSeal => { @@ -426,7 +452,7 @@ impl ZkSyncStateKeeper { }; if seal_resolution.should_seal() { - vlog::debug!( + tracing::debug!( "L1 batch #{} should be sealed with resolution {seal_resolution:?} after executing \ transaction {tx_hash}", self.io.current_l1_batch_number() @@ -466,7 +492,7 @@ impl ZkSyncStateKeeper { // Despite success of upgrade transaction is not enforced by protocol, // we panic here because failed upgrade tx is not intended in any case. - if tx_result.status != TxExecutionStatus::Success { + if tx_result.result.is_failed() { panic!("Failed upgrade tx {:?}", tx.hash()); } @@ -481,6 +507,7 @@ impl ZkSyncStateKeeper { compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, + vec![], ); } SealResolution::ExcludeAndSeal => { @@ -527,8 +554,8 @@ impl ZkSyncStateKeeper { ); SealResolution::ExcludeAndSeal } - TxExecutionResult::RejectedByVm { rejection_reason } => match rejection_reason { - TxRevertReason::NotEnoughGasProvided => { + TxExecutionResult::RejectedByVm { reason } => match reason { + Halt::NotEnoughGasProvided => { metrics::increment_counter!( "server.tx_aggregation.reason", "criterion" => "not_enough_gas_provided_to_start_tx", @@ -536,7 +563,7 @@ impl ZkSyncStateKeeper { ); SealResolution::ExcludeAndSeal } - _ => SealResolution::Unexecutable(rejection_reason.to_string()), + _ => SealResolution::Unexecutable(reason.to_string()), }, TxExecutionResult::Success { tx_result, @@ -545,13 +572,13 @@ impl ZkSyncStateKeeper { bootloader_dry_run_result, .. } => { - let tx_execution_status = tx_result.status; + let tx_execution_status = &tx_result.result; let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, } = *tx_metrics; - vlog::trace!( + tracing::trace!( "finished tx {:?} by {:?} (is_l1: {}) (#{} in l1 batch {}) (#{} in miniblock {}) \ status: {:?}. L1 gas spent: {:?}, total in l1 batch: {:?}, \ tx execution metrics: {:?}, block execution metrics: {:?}", @@ -574,18 +601,20 @@ impl ZkSyncStateKeeper { execution_metrics: finish_block_execution_metrics, } = *bootloader_dry_run_metrics; - let encoding_len = extractors::encoded_transaction_size(tx); + let encoding_len = tx.encoding_len(); - let logs_to_apply = tx_result.result.logs.storage_logs.iter(); - let logs_to_apply = - logs_to_apply.chain(&bootloader_dry_run_result.logs.storage_logs); + let logs_to_apply_iter = tx_result + .logs + .storage_logs + .iter() + .chain(&bootloader_dry_run_result.logs.storage_logs); let block_writes_metrics = updates_manager .storage_writes_deduplicator - .apply_and_rollback(logs_to_apply.clone()); + .apply_and_rollback(logs_to_apply_iter.clone()); let block_writes_l1_gas = gas_count_from_writes(&block_writes_metrics); let tx_writes_metrics = - StorageWritesDeduplicator::apply_on_empty_state(logs_to_apply); + StorageWritesDeduplicator::apply_on_empty_state(logs_to_apply_iter); let tx_writes_l1_gas = gas_count_from_writes(&tx_writes_metrics); let tx_gas_excluding_writes = tx_l1_gas_this_tx + finish_block_l1_gas; diff --git a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs index 30a2335ee1fd..f1d510bea0d6 100644 --- a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use std::time::Duration; use std::time::Instant; use tokio::sync::watch; -use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; +use vm::utils::fee::derive_base_fee_and_gas_per_pubdata; use zksync_config::configs::chain::MempoolConfig; use zksync_dal::ConnectionPool; @@ -57,7 +57,7 @@ impl MempoolFetcher { stuck_tx_timeout: Duration, fair_l2_gas_price: u64, stop_receiver: watch::Receiver, - ) { + ) -> anyhow::Result<()> { { let mut storage = pool.access_storage_tagged("state_keeper").await; if remove_stuck_txs { @@ -65,14 +65,14 @@ impl MempoolFetcher { .transactions_dal() .remove_stuck_txs(stuck_tx_timeout) .await; - vlog::info!("Number of stuck txs was removed: {}", removed_txs); + tracing::info!("Number of stuck txs was removed: {}", removed_txs); } storage.transactions_dal().reset_mempool().await; } loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, mempool is shutting down"); + tracing::info!("Stop signal received, mempool is shutting down"); break; } let started_at = Instant::now(); @@ -97,5 +97,6 @@ impl MempoolFetcher { tokio::time::sleep(self.sync_interval).await; } } + Ok(()) } } diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index 3d01aff3926d..5ccae06a3f4b 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -3,11 +3,12 @@ use tokio::sync::watch; use std::sync::Arc; use zksync_config::{ - configs::chain::{MempoolConfig, StateKeeperConfig}, + configs::chain::{MempoolConfig, NetworkConfig, StateKeeperConfig}, constants::MAX_TXS_IN_BLOCK, ContractsConfig, DBConfig, }; use zksync_dal::ConnectionPool; +use zksync_types::L2ChainId; mod batch_executor; pub(crate) mod extractors; @@ -17,12 +18,11 @@ mod mempool_actor; pub(crate) mod seal_criteria; #[cfg(test)] mod tests; -mod types; +pub(crate) mod types; pub(crate) mod updates; pub use self::{ - batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder, MultiVMConfig}, - io::common::set_missing_initial_writes_indices, + batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder}, keeper::ZkSyncStateKeeper, seal_criteria::SealManager, }; @@ -36,6 +36,7 @@ pub(crate) async fn create_state_keeper( contracts_config: &ContractsConfig, state_keeper_config: StateKeeperConfig, db_config: &DBConfig, + network_config: &NetworkConfig, mempool_config: &MempoolConfig, pool: ConnectionPool, mempool: MempoolGuard, @@ -58,8 +59,7 @@ where pool.clone(), state_keeper_config.max_allowed_l2_tx_gas_limit.into(), state_keeper_config.save_call_traces, - state_keeper_config.validation_computational_gas_limit, - None, // MultiVM is not used on the main node, we always use the latest version. + state_keeper_config.upload_witness_inputs_to_gcs, ); let io = MempoolIO::new( @@ -70,6 +70,8 @@ where &state_keeper_config, mempool_config.delay_interval(), contracts_config.l2_erc20_bridge_addr, + state_keeper_config.validation_computational_gas_limit, + L2ChainId(network_config.zksync_network_id), ) .await; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs index 202f40670ad5..3fc378bcb6b5 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs @@ -53,7 +53,7 @@ impl ConditionalSealer { block_data: &SealData, tx_data: &SealData, ) -> SealResolution { - vlog::trace!( + tracing::trace!( "Determining seal resolution for L1 batch #{l1_batch_number} with {tx_count} transactions \ and metrics {:?}", block_data.execution_metrics @@ -72,7 +72,7 @@ impl ConditionalSealer { SealResolution::IncludeAndSeal | SealResolution::ExcludeAndSeal | SealResolution::Unexecutable(_) => { - vlog::debug!( + tracing::debug!( "L1 batch #{l1_batch_number} processed by `{name}` with resolution {seal_resolution:?}", name = sealer.prom_criterion_name() ); diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index 43c20183b333..6e5224f40326 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,7 +1,5 @@ use std::fmt; - -// Workspace uses. -use vm::{zk_evm::zkevm_opcode_defs::system_params::ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; +use vm::constants::{ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::{ circuit::{GEOMETRY_CONFIG, SCHEDULER_UPPER_BOUND}, diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs index d5d804fafee7..d405358ae333 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs @@ -1,4 +1,4 @@ -use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; +use vm::constants::BOOTLOADER_TX_ENCODING_SPACE; use crate::state_keeper::seal_criteria::{ SealCriterion, SealData, SealResolution, StateKeeperConfig, diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs index a6f34f90325c..a77849257e65 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -11,6 +11,7 @@ //! thus now every criterion is independent of the others. use std::fmt; +use vm::TransactionVmExt; use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::{ @@ -106,7 +107,7 @@ impl SealData { Self { execution_metrics, gas_count, - cumulative_size: extractors::encoded_transaction_size(transaction), + cumulative_size: transaction.bootloader_encoding_size(), writes_metrics, } } @@ -197,7 +198,7 @@ impl SealManager { if should_seal_timeout { metrics::increment_counter!("server.tx_aggregation.reason", "criterion" => RULE_NAME); - vlog::debug!( + tracing::debug!( "Decided to seal L1 batch using rule `{RULE_NAME}`; batch timestamp: {}, \ commit deadline: {block_commit_deadline_ms}ms", extractors::display_timestamp(manager.batch_timestamp()) @@ -282,6 +283,7 @@ mod tests { vec![], BlockGasCount::default(), ExecutionMetrics::default(), + vec![], ); } diff --git a/core/bin/zksync_core/src/state_keeper/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/tests/mod.rs index 8365d120682a..4f8f1fe364d3 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/mod.rs @@ -9,28 +9,26 @@ use std::{ }; use vm::{ - vm::{VmPartialExecutionResult, VmTxExecutionResult}, - vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - VmBlockResult, VmExecutionResult, + constants::BLOCK_GAS_LIMIT, CurrentExecutionState, ExecutionResult, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, + VmExecutionStatistics, }; use zksync_config::{configs::chain::StateKeeperConfig, constants::ZKPORTER_IS_AVAILABLE}; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ aggregated_operations::AggregatedActionType, + block::legacy_miniblock_hash, + block::miniblock_hash, block::BlockGasCount, block::MiniblockReexecuteData, commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, - tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, - vm_trace::{VmExecutionTrace, VmTrace}, - zk_evm::aux_structures::{LogQuery, Timestamp}, - zk_evm::block_properties::BlockProperties, - Address, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, StorageLogQuery, - StorageLogQueryType, Transaction, H256, U256, + tx::tx_execution_info::VmExecutionLogs, + Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, Nonce, ProtocolVersionId, + StorageLogQuery, StorageLogQueryType, Timestamp, Transaction, H256, U256, }; -use zksync_utils::h256_to_u256; use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, @@ -52,10 +50,37 @@ mod tester; pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(super) fn default_block_properties() -> BlockProperties { - BlockProperties { - default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), - zkporter_is_available: ZKPORTER_IS_AVAILABLE, +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId(270), + } +} + +pub(super) fn default_l1_batch_env( + number: u32, + timestamp: u64, + fee_account: Address, +) -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(number), + timestamp, + l1_gas_price: 1, + fair_l2_gas_price: 1, + fee_account, + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number, + timestamp, + prev_block_hash: legacy_miniblock_hash(MiniblockNumber(number - 1)), + max_virtual_blocks_to_create: 1, + }, } } @@ -80,51 +105,32 @@ pub(super) fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { } } -pub(super) fn default_vm_block_result() -> VmBlockResult { - VmBlockResult { - full_result: VmExecutionResult { +pub(super) fn default_vm_block_result() -> FinishedL1Batch { + FinishedL1Batch { + block_tip_execution_result: VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + }, + final_execution_state: CurrentExecutionState { events: vec![], storage_log_queries: vec![], used_contract_hashes: vec![], l2_to_l1_logs: vec![], - return_data: vec![], - gas_used: 0, - contracts_used: 0, - revert_reason: None, - trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), total_log_queries: 0, cycles_used: 0, - computational_gas_used: 0, }, - block_tip_result: VmPartialExecutionResult { - logs: VmExecutionLogs::default(), - revert_reason: None, - contracts_used: 0, - cycles_used: 0, - computational_gas_used: 0, - }, - } -} - -pub(super) fn default_block_context() -> DerivedBlockContext { - DerivedBlockContext { - context: BlockContext { - block_number: 0, - block_timestamp: 0, - l1_gas_price: 0, - fair_l2_gas_price: 0, - operator_address: Address::default(), - }, - base_fee: 0, + final_bootloader_memory: Some(vec![]), } } pub(super) fn create_updates_manager() -> UpdatesManager { - let block_context = BlockContextMode::NewBlock(default_block_context(), 0.into()); + let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); UpdatesManager::new( - &block_context, + l1_batch_env, BaseSystemContractsHashes::default(), - ProtocolVersionId::default(), + ProtocolVersionId::latest(), ) } @@ -161,30 +167,29 @@ pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> Tran pub(super) fn create_execution_result( tx_number_in_block: u16, storage_logs: impl IntoIterator, -) -> VmTxExecutionResult { +) -> VmExecutionResultAndLogs { let storage_logs: Vec<_> = storage_logs .into_iter() .map(|(key, query)| query.into_log(key, tx_number_in_block)) .collect(); - let logs = VmExecutionLogs { - total_log_queries_count: storage_logs.len() + 2, - storage_logs, - events: vec![], - l2_to_l1_logs: vec![], - }; - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: VmPartialExecutionResult { - logs, - revert_reason: None, + let total_log_queries = storage_logs.len() + 2; + VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: VmExecutionLogs { + events: vec![], + l2_to_l1_logs: vec![], + storage_logs, + total_log_queries_count: total_log_queries, + }, + statistics: VmExecutionStatistics { contracts_used: 0, cycles_used: 0, + gas_used: 0, computational_gas_used: 0, + total_log_queries, }, - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, + refunds: Default::default(), } } @@ -523,11 +528,15 @@ async fn pending_batch_is_applied() { MiniblockReexecuteData { number: MiniblockNumber(1), timestamp: 1, + prev_block_hash: miniblock_hash(MiniblockNumber(0), 0, H256::zero(), H256::zero()), + virtual_blocks: 1, txs: vec![random_tx(1)], }, MiniblockReexecuteData { number: MiniblockNumber(2), timestamp: 2, + prev_block_hash: miniblock_hash(MiniblockNumber(1), 1, H256::zero(), H256::zero()), + virtual_blocks: 1, txs: vec![random_tx(2)], }, ]); @@ -621,6 +630,8 @@ async fn miniblock_timestamp_after_pending_batch() { let pending_batch = pending_batch_data(vec![MiniblockReexecuteData { number: MiniblockNumber(1), timestamp: 1, + prev_block_hash: miniblock_hash(MiniblockNumber(0), 0, H256::zero(), H256::zero()), + virtual_blocks: 1, txs: vec![random_tx(1)], }]); diff --git a/core/bin/zksync_core/src/state_keeper/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/tests/tester.rs index 44b793bab345..9e8e79902d78 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/tester.rs @@ -9,23 +9,21 @@ use std::{ }; use vm::{ - vm::{VmPartialExecutionResult, VmTxExecutionResult}, - vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - VmBlockResult, + constants::BLOCK_GAS_LIMIT, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, }; use zksync_types::{ block::MiniblockReexecuteData, protocol_version::ProtocolUpgradeTx, - tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, - ProtocolVersionId, Transaction, H256, U256, + witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, + ProtocolVersionId, Transaction, H256, }; use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, - io::{L1BatchParams, PendingBatchData, StateKeeperIO}, + io::{MiniblockParams, PendingBatchData, StateKeeperIO}, seal_criteria::SealManager, tests::{ - create_l2_transaction, default_block_properties, default_vm_block_result, - BASE_SYSTEM_CONTRACTS, + create_l2_transaction, default_l1_batch_env, default_vm_block_result, BASE_SYSTEM_CONTRACTS, }, types::ExecutionMetricsForCriteria, updates::UpdatesManager, @@ -148,7 +146,7 @@ impl TestScenario { /// Accepts a function that would be given access to the received batch seal params, which can implement /// additional assertions on the sealed batch. pub(crate) fn batch_sealed_with< - F: FnOnce(&VmBlockResult, &UpdatesManager, &BlockContext) + Send + 'static, + F: FnOnce(&VmExecutionResultAndLogs, &UpdatesManager, &L1BatchEnv) + Send + 'static, >( mut self, description: &'static str, @@ -187,7 +185,8 @@ impl TestScenario { if sk_thread.is_finished() { sk_thread .await - .unwrap_or_else(|_| panic!("State keeper thread panicked")); + .unwrap_or_else(|_| panic!("State keeper thread panicked")) + .unwrap(); return; } tokio::time::sleep(poll_interval).await; @@ -206,25 +205,14 @@ pub(crate) fn random_tx(tx_number: u64) -> Transaction { tx.into() } -fn partial_execution_result() -> VmPartialExecutionResult { - VmPartialExecutionResult { - logs: Default::default(), - revert_reason: Default::default(), - contracts_used: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - } -} - /// Creates a `TxExecutionResult` object denoting a successful tx execution. pub(crate) fn successful_exec() -> TxExecutionResult { TxExecutionResult::Success { - tx_result: Box::new(VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, + tx_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), }), tx_metrics: ExecutionMetricsForCriteria { l1_gas: Default::default(), @@ -234,8 +222,14 @@ pub(crate) fn successful_exec() -> TxExecutionResult { l1_gas: Default::default(), execution_metrics: Default::default(), }, - bootloader_dry_run_result: Box::new(partial_execution_result()), + bootloader_dry_run_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + }), compressed_bytecodes: vec![], + call_tracer_result: vec![], } } @@ -244,27 +238,32 @@ pub(crate) fn successful_exec_with_metrics( tx_metrics: ExecutionMetricsForCriteria, ) -> TxExecutionResult { TxExecutionResult::Success { - tx_result: Box::new(VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, + tx_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), }), tx_metrics, bootloader_dry_run_metrics: ExecutionMetricsForCriteria { l1_gas: Default::default(), execution_metrics: Default::default(), }, - bootloader_dry_run_result: Box::new(partial_execution_result()), + bootloader_dry_run_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + }), compressed_bytecodes: vec![], + call_tracer_result: vec![], } } /// Creates a `TxExecutionResult` object denoting a tx that was rejected. pub(crate) fn rejected_exec() -> TxExecutionResult { TxExecutionResult::RejectedByVm { - rejection_reason: vm::TxRevertReason::InnerTxError, + reason: vm::Halt::InnerTxError, } } @@ -278,29 +277,17 @@ pub(crate) fn bootloader_tip_out_of_gas() -> TxExecutionResult { pub(crate) fn pending_batch_data( pending_miniblocks: Vec, ) -> PendingBatchData { - let block_properties = default_block_properties(); - - let context = BlockContext { - block_number: 1, - block_timestamp: 1, - l1_gas_price: 1, - fair_l2_gas_price: 1, - operator_address: FEE_ACCOUNT, - }; - let derived_context = DerivedBlockContext { - context, - base_fee: 1, - }; - - let params = L1BatchParams { - context_mode: BlockContextMode::NewBlock(derived_context, Default::default()), - properties: block_properties, - base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), - protocol_version: ProtocolVersionId::latest(), - }; - PendingBatchData { - params, + l1_batch_env: default_l1_batch_env(1, 1, FEE_ACCOUNT), + system_env: SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId(270), + }, pending_miniblocks, } } @@ -320,7 +307,7 @@ enum ScenarioItem { ), BatchSeal( &'static str, - Option>, + Option>, ), } @@ -427,7 +414,11 @@ impl TestBatchExecutorBuilder { #[async_trait] impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { - async fn init_batch(&self, _l1batch_params: L1BatchParams) -> BatchExecutorHandle { + async fn init_batch( + &self, + _l1batch_params: L1BatchEnv, + _system_env: SystemEnv, + ) -> BatchExecutorHandle { let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = TestBatchExecutor::new( @@ -485,6 +476,9 @@ impl TestBatchExecutor { resp.send(result).unwrap(); self.last_tx = tx.hash(); } + Command::StartNextMiniblock(_, resp) => { + resp.send(()).unwrap(); + } Command::RollbackLastTx(resp) => { // This is an additional safety check: IO would check that every rollback is included in the // test scenario, but here we want to additionally check that each such request goes to the @@ -502,7 +496,7 @@ impl TestBatchExecutor { } Command::FinishBatch(resp) => { // Blanket result, it doesn't really matter. - resp.send(default_vm_block_result()).unwrap(); + resp.send((default_vm_block_result(), None)).unwrap(); return; } } @@ -589,36 +583,49 @@ impl StateKeeperIO for TestIO { self.scenario.pending_batch.take() } - async fn wait_for_new_batch_params(&mut self, _max_wait: Duration) -> Option { - let block_properties = default_block_properties(); - - let previous_block_hash = U256::zero(); - let context = BlockContext { - block_number: self.batch_number.0, - block_timestamp: self.timestamp, - l1_gas_price: self.l1_gas_price, - fair_l2_gas_price: self.fair_l2_gas_price, - operator_address: self.fee_account, - }; - let derived_context = DerivedBlockContext { - context, - base_fee: 1, + async fn wait_for_new_batch_params( + &mut self, + _max_wait: Duration, + ) -> Option<(SystemEnv, L1BatchEnv)> { + let first_miniblock_info = L2BlockEnv { + number: self.miniblock_number.0, + timestamp: self.timestamp, + prev_block_hash: H256::zero(), + max_virtual_blocks_to_create: 1, }; - - Some(L1BatchParams { - context_mode: BlockContextMode::NewBlock(derived_context, previous_block_hash), - properties: block_properties, - base_system_contracts: BASE_SYSTEM_CONTRACTS.clone(), - protocol_version: self.protocol_version, - }) + Some(( + SystemEnv { + zk_porter_available: false, + version: self.protocol_version, + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: L2ChainId(270), + }, + L1BatchEnv { + previous_batch_hash: Some(H256::zero()), + number: self.batch_number, + timestamp: self.timestamp, + l1_gas_price: self.l1_gas_price, + fair_l2_gas_price: self.fair_l2_gas_price, + fee_account: self.fee_account, + enforced_base_fee: None, + first_l2_block: first_miniblock_info, + }, + )) } async fn wait_for_new_miniblock_params( &mut self, _max_wait: Duration, _prev_miniblock_timestamp: u64, - ) -> Option { - Some(self.timestamp) + ) -> Option { + Some(MiniblockParams { + timestamp: self.timestamp, + // 1 is just a constant used for tests. + virtual_blocks: 1, + }) } async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { @@ -684,16 +691,21 @@ impl StateKeeperIO for TestIO { async fn seal_l1_batch( &mut self, - block_result: VmBlockResult, + _witness_block_state: Option, updates_manager: UpdatesManager, - block_context: DerivedBlockContext, + l1_batch_env: &L1BatchEnv, + finished_batch: FinishedL1Batch, ) { let action = self.pop_next_item("seal_l1_batch"); let ScenarioItem::BatchSeal(_, check_fn) = action else { panic!("Unexpected action: {:?}", action); }; if let Some(check_fn) = check_fn { - check_fn(&block_result, &updates_manager, &block_context.context); + check_fn( + &finished_batch.block_tip_execution_result, + &updates_manager, + l1_batch_env, + ); } self.miniblock_number += 1; // Seal the fictive miniblock. diff --git a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs index 36c113c73b5d..c7ab405d9dab 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs @@ -44,20 +44,21 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use zksync_types::{ProtocolVersionId, H256}; + use super::*; use crate::{ gas_tracker::new_block_gas_count, - state_keeper::{ - extractors, - tests::{create_execution_result, create_transaction}, - }, + state_keeper::tests::{create_execution_result, create_transaction}, }; + use vm::TransactionVmExt; #[test] fn apply_miniblock_with_empty_tx() { - let mut miniblock_accumulator = MiniblockUpdates::new(0); + let mut miniblock_accumulator = + MiniblockUpdates::new(0, 0, H256::zero(), 1, Some(ProtocolVersionId::latest())); let tx = create_transaction(10, 100); - let expected_tx_size = extractors::encoded_transaction_size(tx.clone()); + let expected_tx_size = tx.bootloader_encoding_size(); miniblock_accumulator.extend_from_executed_transaction( tx, @@ -65,6 +66,7 @@ mod tests { BlockGasCount::default(), ExecutionMetrics::default(), vec![], + vec![], ); let mut l1_batch_accumulator = L1BatchUpdates::new(); diff --git a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs index d66d9982b561..a49edbee2257 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs @@ -1,16 +1,17 @@ use std::collections::HashMap; +use vm::{ExecutionResult, L2BlockEnv, TransactionVmExt, VmExecutionResultAndLogs}; -use vm::vm::VmTxExecutionResult; use zksync_types::{ - block::BlockGasCount, + block::{legacy_miniblock_hash, miniblock_hash, BlockGasCount}, event::extract_bytecodes_marked_as_known, l2_to_l1_log::L2ToL1Log, - tx::{tx_execution_info::VmExecutionLogs, ExecutionMetrics, TransactionExecutionResult}, - StorageLogQuery, Transaction, VmEvent, H256, + tx::tx_execution_info::TxExecutionStatus, + tx::{ExecutionMetrics, TransactionExecutionResult}, + vm_trace::Call, + MiniblockNumber, ProtocolVersionId, StorageLogQuery, Transaction, VmEvent, H256, }; use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; - -use crate::state_keeper::extractors; +use zksync_utils::concat_and_hash; #[derive(Debug, Clone, PartialEq)] pub struct MiniblockUpdates { @@ -24,10 +25,21 @@ pub struct MiniblockUpdates { pub block_execution_metrics: ExecutionMetrics, pub txs_encoding_size: usize, pub timestamp: u64, + pub number: u32, + pub prev_block_hash: H256, + pub txs_rolling_hash: H256, + pub virtual_blocks: u32, + pub protocol_version: Option, } impl MiniblockUpdates { - pub(crate) fn new(timestamp: u64) -> Self { + pub(crate) fn new( + timestamp: u64, + number: u32, + prev_block_hash: H256, + virtual_blocks: u32, + protocol_version: Option, + ) -> Self { Self { executed_transactions: vec![], events: vec![], @@ -38,26 +50,48 @@ impl MiniblockUpdates { block_execution_metrics: ExecutionMetrics::default(), txs_encoding_size: 0, timestamp, + number, + prev_block_hash, + txs_rolling_hash: H256::zero(), + virtual_blocks, + protocol_version, } } - pub(crate) fn extend_from_fictive_transaction(&mut self, vm_execution_logs: VmExecutionLogs) { - self.events.extend(vm_execution_logs.events); - self.storage_logs.extend(vm_execution_logs.storage_logs); - self.l2_to_l1_logs.extend(vm_execution_logs.l2_to_l1_logs); + pub(crate) fn extend_from_fictive_transaction(&mut self, result: VmExecutionResultAndLogs) { + self.events.extend(result.logs.events); + self.storage_logs.extend(result.logs.storage_logs); + self.l2_to_l1_logs.extend(result.logs.l2_to_l1_logs); } pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, - tx_execution_result: VmTxExecutionResult, + tx_execution_result: VmExecutionResultAndLogs, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: ExecutionMetrics, compressed_bytecodes: Vec, + call_traces: Vec, ) { - // Get bytecode hashes that were marked as known let saved_factory_deps = - extract_bytecodes_marked_as_known(&tx_execution_result.result.logs.events); + extract_bytecodes_marked_as_known(&tx_execution_result.logs.events); + self.events.extend(tx_execution_result.logs.events); + self.l2_to_l1_logs + .extend(tx_execution_result.logs.l2_to_l1_logs); + + let gas_refunded = tx_execution_result.refunds.gas_refunded; + let operator_suggested_refund = tx_execution_result.refunds.operator_suggested_refund; + let execution_status = if tx_execution_result.result.is_failed() { + TxExecutionStatus::Failure + } else { + TxExecutionStatus::Success + }; + + let revert_reason = match &tx_execution_result.result { + ExecutionResult::Success { .. } => None, + ExecutionResult::Revert { output } => Some(output.to_string()), + ExecutionResult::Halt { reason } => Some(reason.to_string()), + }; // Get transaction factory deps let factory_deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); @@ -79,50 +113,70 @@ impl MiniblockUpdates { }); self.new_factory_deps.extend(known_bytecodes); - self.events.extend(tx_execution_result.result.logs.events); - self.storage_logs - .extend(tx_execution_result.result.logs.storage_logs); - self.l2_to_l1_logs - .extend(tx_execution_result.result.logs.l2_to_l1_logs); - self.l1_gas_count += tx_l1_gas_this_tx; self.block_execution_metrics += execution_metrics; - self.txs_encoding_size += extractors::encoded_transaction_size(tx.clone()); + self.txs_encoding_size += tx.bootloader_encoding_size(); + + self.storage_logs + .extend(tx_execution_result.logs.storage_logs); + + self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx.hash()); self.executed_transactions.push(TransactionExecutionResult { hash: tx.hash(), transaction: tx, execution_info: execution_metrics, - execution_status: tx_execution_result.status, - refunded_gas: tx_execution_result.gas_refunded, - operator_suggested_refund: tx_execution_result.operator_suggested_refund, + execution_status, + refunded_gas: gas_refunded, + operator_suggested_refund, compressed_bytecodes, - call_traces: tx_execution_result.call_traces, - revert_reason: tx_execution_result - .result - .revert_reason - .map(|reason| reason.to_string()), + call_traces, + revert_reason, }); } + + /// Calculates miniblock hash based on the protocol version. + pub(crate) fn get_miniblock_hash(&self) -> H256 { + match self.protocol_version { + Some(id) if id >= ProtocolVersionId::Version13 => miniblock_hash( + MiniblockNumber(self.number), + self.timestamp, + self.prev_block_hash, + self.txs_rolling_hash, + ), + _ => legacy_miniblock_hash(MiniblockNumber(self.number)), + } + } + + pub(crate) fn get_miniblock_env(&self) -> L2BlockEnv { + L2BlockEnv { + number: self.number, + timestamp: self.timestamp, + prev_block_hash: self.prev_block_hash, + max_virtual_blocks_to_create: self.virtual_blocks, + } + } } #[cfg(test)] mod tests { use super::*; use crate::state_keeper::tests::{create_execution_result, create_transaction}; + use vm::TransactionVmExt; #[test] fn apply_empty_l2_tx() { - let mut accumulator = MiniblockUpdates::new(0); + let mut accumulator = + MiniblockUpdates::new(0, 0, H256::random(), 0, Some(ProtocolVersionId::latest())); let tx = create_transaction(10, 100); - let expected_tx_size = extractors::encoded_transaction_size(tx.clone()); - + let bootloader_encoding_size = tx.bootloader_encoding_size(); accumulator.extend_from_executed_transaction( tx, create_execution_result(0, []), BlockGasCount::default(), ExecutionMetrics::default(), vec![], + vec![], ); assert_eq!(accumulator.executed_transactions.len(), 1); @@ -132,6 +186,6 @@ mod tests { assert_eq!(accumulator.l1_gas_count, Default::default()); assert_eq!(accumulator.new_factory_deps.len(), 0); assert_eq!(accumulator.block_execution_metrics.l2_l1_logs, 0); - assert_eq!(accumulator.txs_encoding_size, expected_tx_size); + assert_eq!(accumulator.txs_encoding_size, bootloader_encoding_size); } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/mod.rs b/core/bin/zksync_core/src/state_keeper/updates/mod.rs index b537e7eea9c1..0837bc6da60d 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/mod.rs @@ -1,12 +1,11 @@ -use std::mem; +use vm::{L1BatchEnv, VmExecutionResultAndLogs}; -use vm::{vm::VmTxExecutionResult, vm_with_bootloader::BlockContextMode}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_types::vm_trace::Call; use zksync_types::{ - block::BlockGasCount, - storage_writes_deduplicator::StorageWritesDeduplicator, - tx::tx_execution_info::{ExecutionMetrics, VmExecutionLogs}, - Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, + block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, + tx::tx_execution_info::ExecutionMetrics, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -15,6 +14,8 @@ pub mod miniblock_updates; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, miniblock_updates::MiniblockUpdates}; +use super::io::MiniblockParams; + /// Most of the information needed to seal the l1 batch/mini-block is contained within the VM, /// things that are not captured there are accumulated externally. /// `MiniblockUpdates` keeps updates for the pending mini-block. @@ -36,21 +37,25 @@ pub struct UpdatesManager { impl UpdatesManager { pub(crate) fn new( - block_context: &BlockContextMode, + l1_batch_env: L1BatchEnv, base_system_contract_hashes: BaseSystemContractsHashes, protocol_version: ProtocolVersionId, ) -> Self { - let batch_timestamp = block_context.timestamp(); - let context = block_context.inner_block_context().context; Self { - batch_timestamp, - l1_gas_price: context.l1_gas_price, - fair_l2_gas_price: context.fair_l2_gas_price, - base_fee_per_gas: block_context.inner_block_context().base_fee, + batch_timestamp: l1_batch_env.timestamp, + l1_gas_price: l1_batch_env.l1_gas_price, + fair_l2_gas_price: l1_batch_env.fair_l2_gas_price, + base_fee_per_gas: l1_batch_env.base_fee(), protocol_version, base_system_contract_hashes, l1_batch: L1BatchUpdates::new(), - miniblock: MiniblockUpdates::new(batch_timestamp), + miniblock: MiniblockUpdates::new( + l1_batch_env.first_l2_block.timestamp, + l1_batch_env.first_l2_block.number, + l1_batch_env.first_l2_block.prev_block_hash, + l1_batch_env.first_l2_block.max_virtual_blocks_to_create, + Some(protocol_version), + ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), } } @@ -86,7 +91,7 @@ impl UpdatesManager { fair_l2_gas_price: self.fair_l2_gas_price, base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, - protocol_version: self.protocol_version, + protocol_version: Some(self.protocol_version), l2_erc20_bridge_addr, } } @@ -98,35 +103,41 @@ impl UpdatesManager { pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, - tx_execution_result: VmTxExecutionResult, + tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: ExecutionMetrics, + call_traces: Vec, ) { self.storage_writes_deduplicator - .apply(&tx_execution_result.result.logs.storage_logs); + .apply(&tx_execution_result.logs.storage_logs); self.miniblock.extend_from_executed_transaction( tx, tx_execution_result, tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + call_traces, ); } - pub(crate) fn extend_from_fictive_transaction(&mut self, vm_execution_logs: VmExecutionLogs) { + pub(crate) fn extend_from_fictive_transaction(&mut self, result: VmExecutionResultAndLogs) { self.storage_writes_deduplicator - .apply(&vm_execution_logs.storage_logs); - self.miniblock - .extend_from_fictive_transaction(vm_execution_logs); + .apply(&result.logs.storage_logs); + self.miniblock.extend_from_fictive_transaction(result); } /// Pushes a new miniblock with the specified timestamp into this manager. The previously /// held miniblock is considered sealed and is used to extend the L1 batch data. - pub(crate) fn push_miniblock(&mut self, new_miniblock_timestamp: u64) { - let new_miniblock_updates = MiniblockUpdates::new(new_miniblock_timestamp); - let old_miniblock_updates = mem::replace(&mut self.miniblock, new_miniblock_updates); - + pub(crate) fn push_miniblock(&mut self, miniblock_params: MiniblockParams) { + let new_miniblock_updates = MiniblockUpdates::new( + miniblock_params.timestamp, + self.miniblock.number + 1, + self.miniblock.get_miniblock_hash(), + miniblock_params.virtual_blocks, + Some(self.protocol_version), + ); + let old_miniblock_updates = std::mem::replace(&mut self.miniblock, new_miniblock_updates); self.l1_batch .extend_from_sealed_miniblock(old_miniblock_updates); } @@ -159,7 +170,7 @@ pub(crate) struct MiniblockSealCommand { pub fair_l2_gas_price: u64, pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub protocol_version: ProtocolVersionId, + pub protocol_version: Option, pub l2_erc20_bridge_addr: Address, } @@ -187,6 +198,7 @@ mod tests { vec![], new_block_gas_count(), ExecutionMetrics::default(), + vec![], ); // Check that only pending state is updated. @@ -195,7 +207,10 @@ mod tests { assert_eq!(updates_manager.l1_batch.executed_transactions.len(), 0); // Seal miniblock. - updates_manager.push_miniblock(2); + updates_manager.push_miniblock(MiniblockParams { + timestamp: 2, + virtual_blocks: 1, + }); // Check that L1 batch updates are the same with the pending state // and miniblock updates are empty. diff --git a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs index c6488d98daae..d4a90fa95bb5 100644 --- a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs @@ -92,17 +92,17 @@ impl BatchStatusUpdater { } } - pub async fn run(mut self, stop_receiver: Receiver) { + pub async fn run(mut self, stop_receiver: Receiver) -> anyhow::Result<()> { loop { if *stop_receiver.borrow() { - vlog::info!("Stop signal received, exiting the batch status updater routine"); - return; + tracing::info!("Stop signal received, exiting the batch status updater routine"); + return Ok(()); } // Status changes are created externally, so that even if we will receive a network error // while requesting the changes, we will be able to process what we already fetched. let mut status_changes = StatusChanges::new(); if let Err(err) = self.get_status_changes(&mut status_changes).await { - vlog::warn!("Failed to get status changes from the database: {err}"); + tracing::warn!("Failed to get status changes from the database: {err}"); }; if status_changes.is_empty() { @@ -228,7 +228,7 @@ impl BatchStatusUpdater { l1_tx_hash: batch_info.base.commit_tx_hash.unwrap(), happened_at: batch_info.base.committed_at.unwrap(), }); - vlog::info!("Batch {}: committed", batch_info.l1_batch_number); + tracing::info!("Batch {}: committed", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "committed"); *last_committed_l1_batch += 1; } @@ -251,7 +251,7 @@ impl BatchStatusUpdater { l1_tx_hash: batch_info.base.prove_tx_hash.unwrap(), happened_at: batch_info.base.proven_at.unwrap(), }); - vlog::info!("Batch {}: proven", batch_info.l1_batch_number); + tracing::info!("Batch {}: proven", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "proven"); *last_proven_l1_batch += 1; } @@ -274,7 +274,7 @@ impl BatchStatusUpdater { l1_tx_hash: batch_info.base.execute_tx_hash.unwrap(), happened_at: batch_info.base.executed_at.unwrap(), }); - vlog::info!("Batch {}: executed", batch_info.l1_batch_number); + tracing::info!("Batch {}: executed", batch_info.l1_batch_number); metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "executed"); *last_executed_l1_batch += 1; } @@ -293,7 +293,7 @@ impl BatchStatusUpdater { let mut storage = self.pool.access_storage_tagged("sync_layer").await; for change in changes.commit.into_iter() { - vlog::info!( + tracing::info!( "Commit status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, @@ -311,7 +311,7 @@ impl BatchStatusUpdater { self.last_committed_l1_batch = change.number; } for change in changes.prove.into_iter() { - vlog::info!( + tracing::info!( "Prove status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, @@ -329,7 +329,7 @@ impl BatchStatusUpdater { self.last_proven_l1_batch = change.number; } for change in changes.execute.into_iter() { - vlog::info!( + tracing::info!( "Execute status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, diff --git a/core/bin/zksync_core/src/sync_layer/external_io.rs b/core/bin/zksync_core/src/sync_layer/external_io.rs index 01c04db706d3..1a64d57efabd 100644 --- a/core/bin/zksync_core/src/sync_layer/external_io.rs +++ b/core/bin/zksync_core/src/sync_layer/external_io.rs @@ -1,13 +1,21 @@ -use std::{collections::HashMap, convert::TryFrom, iter::FromIterator, time::Duration}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + iter::FromIterator, + time::Duration, +}; -use super::genesis::fetch_system_contract_by_hash; use actix_rt::time::Instant; use async_trait::async_trait; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; + +use vm::{FinishedL1Batch, L1BatchEnv, SystemEnv}; +use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; +use zksync_types::block::legacy_miniblock_hash; +use zksync_types::witness_block_state::WitnessBlockState; use zksync_types::{ ethabi::Address, l1::L1Tx, l2::L2Tx, protocol_version::ProtocolUpgradeTx, L1BatchNumber, - L1BlockNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, + L1BlockNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -15,13 +23,17 @@ use crate::state_keeper::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - L1BatchParams, PendingBatchData, StateKeeperIO, + MiniblockParams, PendingBatchData, StateKeeperIO, }, seal_criteria::SealerFn, updates::UpdatesManager, }; use super::{ + genesis::{ + fetch_protocol_version, fetch_sync_block_without_transactions, + fetch_system_contract_by_hash, + }, sync_action::{ActionQueue, SyncAction}, SyncState, }; @@ -46,15 +58,18 @@ impl ExternalNodeSealer { fn should_seal_miniblock(&self) -> bool { let res = matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)); if res { - vlog::info!("Sealing miniblock"); + tracing::info!("Sealing miniblock"); } res } fn should_seal_batch(&self) -> bool { - let res = matches!(self.actions.peek_action(), Some(SyncAction::SealBatch)); + let res = matches!( + self.actions.peek_action(), + Some(SyncAction::SealBatch { .. }) + ); if res { - vlog::info!("Sealing the batch"); + tracing::info!("Sealing the batch"); } res } @@ -86,6 +101,9 @@ pub struct ExternalIO { /// Required to extract newly added tokens. l2_erc20_bridge_addr: Address, + // TODO it's required for system env, probably we have to get rid of getting system env + validation_computational_gas_limit: u32, + chain_id: L2ChainId, } impl ExternalIO { @@ -95,13 +113,15 @@ impl ExternalIO { sync_state: SyncState, main_node_url: String, l2_erc20_bridge_addr: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, ) -> Self { let mut storage = pool.access_storage_tagged("sync_layer").await; let last_sealed_l1_batch_header = storage.blocks_dal().get_newest_l1_batch_header().await; let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; drop(storage); - vlog::info!( + tracing::info!( "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", last_sealed_l1_batch_header.number + 1, last_miniblock_number + 1, @@ -117,9 +137,62 @@ impl ExternalIO { sync_state, main_node_url, l2_erc20_bridge_addr, + validation_computational_gas_limit, + chain_id, } } + pub async fn recalculate_miniblock_hashes(&self) { + let mut storage = self.pool.access_storage_tagged("sync_layer").await; + let last_blocks: Vec<_> = storage + .blocks_dal() + .get_last_miniblocks_for_version(5, ProtocolVersionId::Version12) + .await; + + // All last miniblocks are good that means we have already applied this migrations + if last_blocks + .into_iter() + .all(|(number, hash)| legacy_miniblock_hash(number) == hash) + { + return; + } + + // August 29 2023 + let timestamp = 1693267200; + let mut miniblock_and_hashes = storage + .blocks_dal() + .get_miniblock_hashes_from_date(timestamp, 1000, ProtocolVersionId::Version12) + .await; + + let mut updated_hashes = vec![]; + + let mut last_miniblock_number = 0; + while !miniblock_and_hashes.is_empty() { + for (number, hash) in miniblock_and_hashes { + if hash != legacy_miniblock_hash(number) { + updated_hashes.push((number, legacy_miniblock_hash(number))) + } + last_miniblock_number = number.0; + } + if !updated_hashes.is_empty() { + storage.blocks_dal().update_hashes(&updated_hashes).await; + updated_hashes = vec![]; + } + + miniblock_and_hashes = storage + .blocks_dal() + .get_miniblocks_since_block( + last_miniblock_number as i64 + 1, + 1000, + ProtocolVersionId::Version12, + ) + .await; + tracing::info!("Last updated miniblock {}", last_miniblock_number); + } + + tracing::info!("Finish the hash recalculation") + } + async fn load_previous_l1_batch_hash(&self) -> U256 { let mut storage = self.pool.access_storage_tagged("sync_layer").await; @@ -134,6 +207,55 @@ impl ExternalIO { hash } + async fn load_base_system_contracts_by_version_id( + &self, + id: ProtocolVersionId, + ) -> BaseSystemContracts { + let base_system_contracts = self + .pool + .access_storage_tagged("sync_layer") + .await + .protocol_versions_dal() + .load_base_system_contracts_by_version_id(id as u16) + .await; + + match base_system_contracts { + Some(version) => version, + None => { + let protocol_version = fetch_protocol_version(&self.main_node_url, id) + .await + .expect("Failed to fetch protocol version from the main node"); + self.pool + .access_storage_tagged("sync_layer") + .await + .protocol_versions_dal() + .save_protocol_version( + protocol_version.version_id.try_into().unwrap(), + protocol_version.timestamp, + protocol_version.verification_keys_hashes, + protocol_version.base_system_contracts, + // Verifier is not used in the external node, so we can pass an empty + Default::default(), + protocol_version.l2_system_upgrade_tx_hash, + ) + .await; + + let bootloader = self + .get_base_system_contract(protocol_version.base_system_contracts.bootloader) + .await; + + let default_aa = self + .get_base_system_contract(protocol_version.base_system_contracts.default_aa) + .await; + + BaseSystemContracts { + bootloader, + default_aa, + } + } + } + } + async fn get_base_system_contract(&self, hash: H256) -> SystemContractCode { let bytecode = self .pool @@ -150,7 +272,7 @@ impl ExternalIO { }, None => { let main_node_url = self.main_node_url.clone(); - vlog::info!("Fetching base system contract bytecode from the main node"); + tracing::info!("Fetching base system contract bytecode from the main node"); let contract = fetch_system_contract_by_hash(&main_node_url, hash) .await .expect("Failed to fetch base system contract bytecode from the main node"); @@ -182,6 +304,7 @@ impl StateKeeperIO for ExternalIO { async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("sync_layer").await; + // TODO (BFT-99): Do not assume that fee account is the same as in previous batch. let fee_account = storage .blocks_dal() .get_l1_batch_header(self.current_l1_batch_number - 1) @@ -193,11 +316,53 @@ impl StateKeeperIO for ExternalIO { ) }) .fee_account_address; - load_pending_batch(&mut storage, self.current_l1_batch_number, fee_account).await + let pending_miniblock_number = { + let (_, last_miniblock_number_included_in_l1_batch) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(self.current_l1_batch_number - 1) + .await + .unwrap(); + last_miniblock_number_included_in_l1_batch + 1 + }; + let pending_miniblock_header = storage + .blocks_dal() + .get_miniblock_header(pending_miniblock_number) + .await?; + + if pending_miniblock_header.protocol_version.is_none() { + // Fetch protocol version ID for pending miniblocks to know which VM to use to reexecute them. + let sync_block = fetch_sync_block_without_transactions( + &self.main_node_url, + pending_miniblock_header.number, + ) + .await + .expect("Failed to fetch block from the main node") + .expect("Block must exist"); + // Loading base system contracts will insert protocol version in the database if it's not present there. + let _ = self + .load_base_system_contracts_by_version_id(sync_block.protocol_version) + .await; + storage + .blocks_dal() + .set_protocol_version_for_pending_miniblocks(sync_block.protocol_version) + .await; + } + + load_pending_batch( + &mut storage, + self.current_l1_batch_number, + fee_account, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await } - async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { - vlog::debug!("Waiting for the new batch params"); + async fn wait_for_new_batch_params( + &mut self, + max_wait: Duration, + ) -> Option<(SystemEnv, L1BatchEnv)> { + tracing::debug!("Waiting for the new batch params"); for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { match self.actions.pop_action() { Some(SyncAction::OpenBatch { @@ -205,27 +370,22 @@ impl StateKeeperIO for ExternalIO { timestamp, l1_gas_price, l2_fair_gas_price, - base_system_contracts_hashes: - BaseSystemContractsHashes { - bootloader, - default_aa, - }, operator_address, protocol_version, + first_miniblock_info: (miniblock_number, virtual_blocks), + prev_miniblock_hash, }) => { assert_eq!( number, self.current_l1_batch_number, "Batch number mismatch" ); - vlog::info!("Getting previous L1 batch hash"); + tracing::info!("Getting previous L1 batch hash"); let previous_l1_batch_hash = self.load_previous_l1_batch_hash().await; - vlog::info!("Previous L1 batch hash: {previous_l1_batch_hash}"); - - let base_system_contracts = BaseSystemContracts { - bootloader: self.get_base_system_contract(bootloader).await, - default_aa: self.get_base_system_contract(default_aa).await, - }; + tracing::info!("Previous L1 batch hash: {previous_l1_batch_hash}"); + let base_system_contracts = self + .load_base_system_contracts_by_version_id(protocol_version) + .await; return Some(l1_batch_params( number, operator_address, @@ -233,8 +393,13 @@ impl StateKeeperIO for ExternalIO { previous_l1_batch_hash, l1_gas_price, l2_fair_gas_price, + miniblock_number, + prev_miniblock_hash, base_system_contracts, - protocol_version.unwrap_or_default(), + self.validation_computational_gas_limit, + protocol_version, + virtual_blocks, + self.chain_id, )); } Some(other) => { @@ -252,25 +417,35 @@ impl StateKeeperIO for ExternalIO { &mut self, max_wait: Duration, _prev_miniblock_timestamp: u64, - ) -> Option { + ) -> Option { // Wait for the next miniblock to appear in the queue. let actions = &self.actions; for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { match actions.peek_action() { - Some(SyncAction::Miniblock { number, timestamp }) => { + Some(SyncAction::Miniblock { + number, + timestamp, + virtual_blocks, + }) => { self.actions.pop_action(); // We found the miniblock, remove it from the queue. assert_eq!( number, self.current_miniblock_number, "Miniblock number mismatch" ); - return Some(timestamp); + return Some(MiniblockParams { + timestamp, + virtual_blocks, + }); } - Some(SyncAction::SealBatch) => { + Some(SyncAction::SealBatch { virtual_blocks }) => { // We've reached the next batch, so this situation would be handled by the batch sealer. // No need to pop the action from the queue. // It also doesn't matter which timestamp we return, since there will be no more miniblocks in this // batch. We return 0 to make it easy to detect if it ever appears somewhere. - return Some(0); + return Some(MiniblockParams { + timestamp: 0, + virtual_blocks, + }); } Some(other) => { panic!( @@ -289,7 +464,7 @@ impl StateKeeperIO for ExternalIO { async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { let actions = &self.actions; - vlog::debug!( + tracing::debug!( "Waiting for the new tx, next action is {:?}", actions.peek_action() ); @@ -356,6 +531,13 @@ impl StateKeeperIO for ExternalIO { .transactions_dal() .insert_transaction_l2(l2_tx, Default::default()) .await; + } else if let Ok(protocol_system_upgrade_tx) = + ProtocolUpgradeTx::try_from(tx.transaction.clone()) + { + transaction + .transactions_dal() + .insert_system_transaction(protocol_system_upgrade_tx) + .await; } else { unreachable!("Transaction {:?} is neither L1 nor L2", tx.transaction); } @@ -378,17 +560,19 @@ impl StateKeeperIO for ExternalIO { self.sync_state .set_local_block(self.current_miniblock_number); self.current_miniblock_number += 1; - vlog::info!("Miniblock {} is sealed", self.current_miniblock_number); + tracing::info!("Miniblock {} is sealed", self.current_miniblock_number); } async fn seal_l1_batch( &mut self, - block_result: vm::VmBlockResult, + // needed as part of the interface, to be removed once we transition to Merkle Paths + _witness_block_state: Option, updates_manager: UpdatesManager, - block_context: vm::vm_with_bootloader::DerivedBlockContext, + l1_batch_env: &L1BatchEnv, + finished_batch: FinishedL1Batch, ) { match self.actions.pop_action() { - Some(SyncAction::SealBatch) => {} + Some(SyncAction::SealBatch { .. }) => {} other => panic!( "State keeper requested to seal the batch, but the next action is {:?}", other @@ -400,14 +584,13 @@ impl StateKeeperIO for ExternalIO { .seal_l1_batch( &mut storage, self.current_miniblock_number, - self.current_l1_batch_number, - block_result, - block_context, + l1_batch_env, + finished_batch, self.l2_erc20_bridge_addr, ) .await; - vlog::info!("Batch {} is sealed", self.current_l1_batch_number); + tracing::info!("Batch {} is sealed", self.current_l1_batch_number); // Mimic the metric emitted by the main node to reuse existing grafana charts. metrics::gauge!( diff --git a/core/bin/zksync_core/src/sync_layer/fetcher.rs b/core/bin/zksync_core/src/sync_layer/fetcher.rs index f55201409655..728f361708f4 100644 --- a/core/bin/zksync_core/src/sync_layer/fetcher.rs +++ b/core/bin/zksync_core/src/sync_layer/fetcher.rs @@ -4,7 +4,7 @@ use tokio::sync::watch::Receiver; use crate::sync_layer::sync_action::{ActionQueue, SyncAction}; use zksync_dal::ConnectionPool; -use zksync_types::{L1BatchNumber, MiniblockNumber}; +use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::jsonrpsee::core::Error as RpcError; use zksync_web3_decl::RpcResult; @@ -19,7 +19,6 @@ pub struct MainNodeFetcher { client: CachedMainNodeClient, current_l1_batch: L1BatchNumber, current_miniblock: MiniblockNumber, - actions: ActionQueue, sync_state: SyncState, stop_receiver: Receiver, @@ -58,15 +57,14 @@ impl MainNodeFetcher { client, current_l1_batch, current_miniblock, - actions, sync_state, stop_receiver, } } - pub async fn run(mut self) { - vlog::info!( + pub async fn run(mut self) -> anyhow::Result<()> { + tracing::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", self.current_miniblock, self.current_l1_batch @@ -75,16 +73,16 @@ impl MainNodeFetcher { loop { match self.run_inner().await { Ok(()) => { - vlog::info!("Stop signal received, exiting the fetcher routine"); - return; + tracing::info!("Stop signal received, exiting the fetcher routine"); + return Ok(()); } Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { - vlog::warn!("Following transport error occurred: {}", err); - vlog::info!("Trying again after a delay"); - tokio::time::sleep(RETRY_DELAY_INTERVAL).await; + tracing::warn!("Following transport error occurred: {}", err); + tracing::info!("Trying again after a delay"); + tokio::time::sleep(RETRY_DELAY_INTERVAL).await; // TODO (BFT-100): Implement the fibonacci backoff. } Err(err) => { - panic!("Unexpected error in the fetcher: {}", err); + anyhow::bail!("Unexpected error in the fetcher: {}", err); } } } @@ -121,7 +119,7 @@ impl MainNodeFetcher { } else { "Local action queue is full, waiting for state keeper to process the queue" }; - vlog::debug!("{log_message}"); + tracing::debug!("{log_message}"); tokio::time::sleep(DELAY_INTERVAL).await; } } @@ -136,6 +134,14 @@ impl MainNodeFetcher { let Some(block) = self.client.sync_l2_block(self.current_miniblock).await? else { return Ok(false); }; + + // This will be fetched from cache. + let prev_block = self + .client + .sync_l2_block(self.current_miniblock - 1) + .await? + .expect("Previous block must exist"); + metrics::histogram!( "external_node.fetcher.requests", request_start.elapsed(), @@ -151,7 +157,7 @@ impl MainNodeFetcher { "Unexpected batch number in the next received miniblock" ); - vlog::info!( + tracing::info!( "New batch: {}. Timestamp: {}", block.l1_batch_number, block.timestamp @@ -162,9 +168,12 @@ impl MainNodeFetcher { timestamp: block.timestamp, l1_gas_price: block.l1_gas_price, l2_fair_gas_price: block.l2_fair_gas_price, - base_system_contracts_hashes: block.base_system_contracts_hashes, operator_address: block.operator_address, - protocol_version: None, + protocol_version: block.protocol_version, + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + first_miniblock_info: (block.number, block.virtual_blocks.unwrap_or(0)), + // Same for `prev_block.hash` as above. + prev_miniblock_hash: prev_block.hash.unwrap_or_else(H256::zero), }); metrics::gauge!("external_node.fetcher.l1_batch", block.l1_batch_number.0 as f64, "status" => "open"); self.current_l1_batch += 1; @@ -174,6 +183,8 @@ impl MainNodeFetcher { new_actions.push(SyncAction::Miniblock { number: block.number, timestamp: block.timestamp, + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + virtual_blocks: block.virtual_blocks.unwrap_or(0), }); metrics::gauge!("external_node.fetcher.miniblock", block.number.0 as f64); } @@ -191,17 +202,22 @@ impl MainNodeFetcher { // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. if block.last_in_batch { - new_actions.push(SyncAction::SealBatch); + new_actions.push(SyncAction::SealBatch { + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + virtual_blocks: block.virtual_blocks.unwrap_or(0), + }); } else { new_actions.push(SyncAction::SealMiniblock); } - vlog::info!( + tracing::info!( "New miniblock: {} / {}", block.number, self.sync_state.get_main_node_block().max(block.number) ); - self.client.forget_miniblock(self.current_miniblock); + // Forgetting only the previous one because we still need the current one in cache for the next iteration. + self.client + .forget_miniblock(MiniblockNumber(self.current_miniblock.0.saturating_sub(1))); self.current_miniblock += 1; self.actions.push_actions(new_actions); diff --git a/core/bin/zksync_core/src/sync_layer/genesis.rs b/core/bin/zksync_core/src/sync_layer/genesis.rs index 6843d12757ee..74a9f42d017e 100644 --- a/core/bin/zksync_core/src/sync_layer/genesis.rs +++ b/core/bin/zksync_core/src/sync_layer/genesis.rs @@ -6,7 +6,7 @@ use zksync_dal::StorageProcessor; use zksync_types::{ api, block::DeployedContract, get_code_key, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, AccountTreeId, Address, L1BatchNumber, L2ChainId, - MiniblockNumber, ACCOUNT_CODE_STORAGE_ADDRESS, H256, U64, + MiniblockNumber, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, H256, U64, }; use zksync_utils::h256_to_u256; use zksync_web3_decl::{ @@ -24,15 +24,15 @@ pub async fn perform_genesis_if_needed( // make the node startup slower. let genesis_block_hash = if transaction.blocks_dal().is_genesis_needed().await { let genesis_params = create_genesis_params(&main_node_url).await?; - let genesis_block_hash = - ensure_genesis_state(&mut transaction, zksync_chain_id, &genesis_params).await; - genesis_block_hash + ensure_genesis_state(&mut transaction, zksync_chain_id, &genesis_params) + .await + .context("ensure_genesis_state")? } else { transaction .blocks_dal() .get_l1_batch_state_root(L1BatchNumber(0)) .await - .expect("genesis block hash is empty") + .context("genesis block hash is empty")? }; validate_genesis_state(&main_node_url, genesis_block_hash).await; @@ -95,7 +95,7 @@ async fn create_genesis_params(main_node_url: &str) -> anyhow::Result anyhow::Result Result { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + + Ok(client + .get_protocol_version(Some(protocol_version as u16)) + .await? + .expect("Protocol version must exist")) +} + +pub async fn fetch_sync_block_without_transactions( + main_node_url: &str, + miniblock_number: MiniblockNumber, +) -> Result, Error> { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); + client.sync_l2_block(miniblock_number, false).await +} diff --git a/core/bin/zksync_core/src/sync_layer/sync_action.rs b/core/bin/zksync_core/src/sync_layer/sync_action.rs index 24ccecc7bdfe..ebceae791828 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_action.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_action.rs @@ -4,11 +4,13 @@ use std::{ time::Instant, }; -use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256}; /// Action queue is used to communicate between the fetcher and the rest of the external node /// by collecting the fetched data in memory until it gets processed by the different entities. +/// +/// TODO (BFT-82): This structure right now expects no more than a single consumer. Using `peek/pop` pairs in +/// two different threads may lead to a race condition. #[derive(Debug, Clone, Default)] pub struct ActionQueue { inner: Arc>, @@ -35,7 +37,7 @@ impl ActionQueue { /// Returns true if the queue has capacity for a new action. /// Capacity is limited to avoid memory exhaustion. pub(crate) fn has_action_capacity(&self) -> bool { - const ACTION_CAPACITY: usize = 32_768; + const ACTION_CAPACITY: usize = 32_768; // TODO: Make it configurable. // Since the capacity is read before the action is pushed, // it is possible that the capacity will be exceeded, since the fetcher will @@ -86,7 +88,7 @@ impl ActionQueue { return Err(format!("Unexpected Tx: {:?}", actions)); } } - SyncAction::SealMiniblock | SyncAction::SealBatch => { + SyncAction::SealMiniblock | SyncAction::SealBatch { .. } => { if !opened || miniblock_sealed { return Err(format!("Unexpected SealMiniblock/SealBatch: {:?}", actions)); } @@ -128,13 +130,16 @@ pub(crate) enum SyncAction { timestamp: u64, l1_gas_price: u64, l2_fair_gas_price: u64, - base_system_contracts_hashes: BaseSystemContractsHashes, operator_address: Address, - protocol_version: Option, + protocol_version: ProtocolVersionId, + // Miniblock number and virtual blocks count. + first_miniblock_info: (MiniblockNumber, u32), + prev_miniblock_hash: H256, }, Miniblock { number: MiniblockNumber, timestamp: u64, + virtual_blocks: u32, }, Tx(Box), /// We need an explicit action for the miniblock sealing, since we fetch the whole miniblocks and already know @@ -143,7 +148,10 @@ pub(crate) enum SyncAction { /// the next one is sealed on the main node. SealMiniblock, /// Similarly to `SealMiniblock` we must be able to seal the batch even if there is no next miniblock yet. - SealBatch, + SealBatch { + // Virtual blocks count for the fictive miniblock. + virtual_blocks: u32, + }, } impl From for SyncAction { @@ -164,9 +172,10 @@ mod tests { timestamp: 1, l1_gas_price: 1, l2_fair_gas_price: 1, - base_system_contracts_hashes: BaseSystemContractsHashes::default(), operator_address: Default::default(), - protocol_version: Some(ProtocolVersionId::latest()), + protocol_version: ProtocolVersionId::latest(), + first_miniblock_info: (1.into(), 1), + prev_miniblock_hash: H256::default(), } } @@ -174,6 +183,7 @@ mod tests { SyncAction::Miniblock { number: 1.into(), timestamp: 1, + virtual_blocks: 1, } } @@ -198,7 +208,7 @@ mod tests { } fn seal_batch() -> SyncAction { - SyncAction::SealBatch + SyncAction::SealBatch { virtual_blocks: 1 } } #[test] diff --git a/core/bin/zksync_core/src/sync_layer/sync_state.rs b/core/bin/zksync_core/src/sync_layer/sync_state.rs index 83027a446723..3705f3dd5ed9 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_state.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_state.rs @@ -38,9 +38,11 @@ impl SyncState { let mut inner = self.inner.write().unwrap(); if let Some(local_block) = inner.local_block { if block.0 < local_block.0 { - panic!( + // Probably it's fine -- will be checked by the reorg detector. + tracing::warn!( "main_node_block({}) is less than local_block({})", - block, local_block + block, + local_block ); } } @@ -53,7 +55,7 @@ impl SyncState { if let Some(main_node_block) = inner.main_node_block { if block.0 > main_node_block.0 { // Probably it's fine -- will be checked by the reorg detector. - vlog::info!( + tracing::warn!( "local_block({}) is greater than main_node_block({})", block, main_node_block @@ -140,11 +142,14 @@ mod tests { } #[test] - #[should_panic(expected = "main_node_block(1) is less than local_block(2)")] - fn test_sync_state_panic_on_main_node_block() { + fn test_sync_state_doesnt_panic_on_main_node_block() { let sync_state = SyncState::new(); sync_state.set_local_block(MiniblockNumber(2)); sync_state.set_main_node_block(MiniblockNumber(1)); + // ^ should not panic, as we defer the situation to the reorg detector. + + // At the same time, we should consider ourselves synced unless `ReorgDetector` tells us otherwise. + assert!(sync_state.is_synced()); } } diff --git a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs index d2a814fa48db..d96f3a30e2e1 100644 --- a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs @@ -1,6 +1,7 @@ use std::collections::hash_map::DefaultHasher; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; + use std::sync::Arc; use std::time::Instant; @@ -8,19 +9,18 @@ use async_trait::async_trait; use rand::Rng; use serde::{Deserialize, Serialize}; -use vm::zk_evm::ethereum_types::H256; -use vm::HistoryDisabled; -use vm::{memory::SimpleMemory, StorageOracle, MAX_CYCLES_FOR_TX}; +use vm::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, SimpleMemory, StorageOracle}; +use zksync_config::configs::witness_generator::BasicWitnessGeneratorDataSource; use zksync_config::configs::WitnessGeneratorConfig; use zksync_config::constants::BOOTLOADER_ADDRESS; use zksync_dal::ConnectionPool; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_queued_job_processor::JobProcessor; -use zksync_state::{PostgresStorage, StorageView}; -use zksync_types::zkevm_test_harness::toolset::GeometryConfig; +use zksync_state::{PostgresStorage, ReadStorage, ShadowStorage, StorageView, WitnessStorage}; use zksync_types::{ circuit::GEOMETRY_CONFIG, proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, + zkevm_test_harness::toolset::GeometryConfig, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, @@ -28,7 +28,7 @@ use zksync_types::{ witness::oracle::VmWitnessOracle, SchedulerCircuitInstanceWitness, }, - Address, L1BatchNumber, ProtocolVersionId, U256, + Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; @@ -100,7 +100,7 @@ impl BasicWitnessGenerator { // In this case job should be skipped. if threshold > blocks_proving_percentage { metrics::counter!("server.witness_generator.skipped_blocks", 1); - vlog::info!( + tracing::info!( "Skipping witness generation for block {}, blocks_proving_percentage: {}", block_number.0, blocks_proving_percentage @@ -120,7 +120,7 @@ impl BasicWitnessGenerator { } metrics::counter!("server.witness_generator.sampled_blocks", 1); - vlog::info!( + tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::BasicCircuits, block_number.0 @@ -242,7 +242,7 @@ pub async fn process_basic_circuits_job( generate_witness(object_store, config, connection_pool, witness_gen_input).await; let circuits = basic_circuits.clone().into_flattened_set(); - vlog::info!( + tracing::info!( "Witness generation for block {} is complete in {:?}. Number of circuits: {}", block_number.0, started_at.elapsed(), @@ -451,21 +451,58 @@ pub async fn generate_witness( // The following part is CPU-heavy, so we move it to a separate thread. tokio::task::spawn_blocking(move || { - let connection = rt_handle.block_on(connection_pool.access_storage()); - let storage = - PostgresStorage::new(rt_handle.clone(), connection, last_miniblock_number, true); + // NOTE: this `match` will be moved higher up, as we need to load EVERYTHING from Blob, not just storage + // Until we can derive Storage from Merkle Paths, we'll have this version as testing ground. + let storage: Box = match config.data_source { + BasicWitnessGeneratorDataSource::FromPostgres => { + let connection = rt_handle.block_on(connection_pool.access_storage()); + Box::new(PostgresStorage::new( + rt_handle.clone(), + connection, + last_miniblock_number, + true, + )) + } + BasicWitnessGeneratorDataSource::FromPostgresShadowBlob => { + let connection = rt_handle.block_on(connection_pool.access_storage()); + let source_storage = Box::new(PostgresStorage::new( + rt_handle.clone(), + connection, + last_miniblock_number, + true, + )); + let block_state = input + .merkle_paths_input + .clone() + .into_witness_hash_block_state(); + let checked_storage = Box::new(WitnessStorage::new(block_state)); + Box::new(ShadowStorage::new( + source_storage, + checked_storage, + input.block_number, + )) + } + BasicWitnessGeneratorDataSource::FromBlob => { + let block_state = input + .merkle_paths_input + .clone() + .into_witness_hash_block_state(); + Box::new(WitnessStorage::new(block_state)) + } + }; let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths_input, input.previous_block_hash.0, ); - let storage_view = &mut StorageView::new(storage); - let storage_oracle: StorageOracle = - StorageOracle::new(storage_view.as_ptr()); + let storage_view = StorageView::new(storage); + let storage_view = storage_view.to_rc_ptr(); + let storage_oracle: StorageOracle>, HistoryDisabled> = + StorageOracle::new(storage_view); let memory: SimpleMemory = SimpleMemory::default(); let mut hasher = DefaultHasher::new(); GEOMETRY_CONFIG.hash(&mut hasher); - vlog::info!( + tracing::info!( "generating witness for block {} using geometry config hash: {}", input.block_number.0, hasher.finish() diff --git a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs index 62280926733d..eedb2d4d8e9c 100644 --- a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -76,7 +76,7 @@ impl LeafAggregationWitnessGenerator { ) -> LeafAggregationArtifacts { let LeafAggregationWitnessGeneratorJob { block_number, job } = leaf_job; - vlog::info!( + tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::LeafAggregation, block_number.0 @@ -179,7 +179,7 @@ pub fn process_leaf_aggregation_job( VerificationKey>>, > = get_vks_for_basic_circuits(); - vlog::info!( + tracing::info!( "Verification keys loaded in {:?}", stage_started_at.elapsed() ); @@ -194,7 +194,7 @@ pub fn process_leaf_aggregation_job( verification_keys, )); - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + tracing::info!("Commitments generated in {:?}", stage_started_at.elapsed()); let stage_started_at = Instant::now(); @@ -210,11 +210,11 @@ pub fn process_leaf_aggregation_job( g2_points, ); - vlog::info!( + tracing::info!( "prepare_leaf_aggregations took {:?}", stage_started_at.elapsed() ); - vlog::info!( + tracing::info!( "Leaf witness generation for block {} is complete in {:?}. Number of circuits: {}", block_number.0, started_at.elapsed(), diff --git a/core/bin/zksync_core/src/witness_generator/mod.rs b/core/bin/zksync_core/src/witness_generator/mod.rs index c39a9d8eb605..4700318d5b39 100644 --- a/core/bin/zksync_core/src/witness_generator/mod.rs +++ b/core/bin/zksync_core/src/witness_generator/mod.rs @@ -26,7 +26,7 @@ mod tests; /// each of them starts with an invocation of `WitnessGenerator` with a corresponding `WitnessGeneratorJobType`: /// * `WitnessGeneratorJobType::BasicCircuits`: /// generates basic circuits (circuits like `Main VM` - up to 50 * 48 = 2400 circuits): -/// input table: `basic_circuit_witness_jobs` +/// input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) /// artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and `scheduler_aggregation_jobs`) /// value in `aggregation_round` field of `prover_jobs` table: 0 /// * `WitnessGeneratorJobType::LeafAggregation`: @@ -52,7 +52,7 @@ mod tests; /// setting its status to `queued` /// * `WitnessGenerator` picks up such job and proceeds to the next round /// -/// Note that the very first input table (`basic_circuit_witness_jobs`) +/// Note that the very first input table (`basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`)) /// is populated by the tree (as the input artifact for the `WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) /// diff --git a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs index ac9009a1ea8e..7bcaaec077f6 100644 --- a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs @@ -84,7 +84,7 @@ impl NodeAggregationWitnessGenerator { let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); let NodeAggregationWitnessGeneratorJob { block_number, job } = node_job; - vlog::info!( + tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::NodeAggregation, block_number.0 @@ -179,7 +179,7 @@ pub fn process_node_aggregation_job( &config.key_download_url, ); env::set_var("CRS_FILE", config.initial_setup_key_path); - vlog::info!("Keys loaded in {:?}", stage_started_at.elapsed()); + tracing::info!("Keys loaded in {:?}", stage_started_at.elapsed()); let stage_started_at = Instant::now(); let verification_keys: HashMap< @@ -208,13 +208,13 @@ pub fn process_node_aggregation_job( node_aggregation_vk, )); - vlog::info!( + tracing::info!( "commitments: basic set: {:?}, leaf: {:?}, node: {:?}", to_hex(&set_committment), to_hex(&leaf_aggregation_vk_committment), to_hex(&node_aggregation_vk_committment) ); - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + tracing::info!("Commitments generated in {:?}", stage_started_at.elapsed()); // fs::write("previous_level_proofs.bincode", bincode::serialize(&job.previous_level_proofs).unwrap()).unwrap(); // fs::write("leaf_aggregation_vk.bincode", bincode::serialize(&leaf_aggregation_vk).unwrap()).unwrap(); @@ -245,7 +245,7 @@ pub fn process_node_aggregation_job( g2_points, ); - vlog::info!( + tracing::info!( "prepare_node_aggregations took {:?}", stage_started_at.elapsed() ); @@ -261,7 +261,7 @@ pub fn process_node_aggregation_job( "prepare_node_aggregations returned more than one node aggregation" ); - vlog::info!( + tracing::info!( "Node witness generation for block {} is complete in {:?}. Number of circuits: {}", block_number.0, started_at.elapsed(), diff --git a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs index 48c063857b14..96705de7e918 100644 --- a/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs +++ b/core/bin/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs @@ -22,7 +22,7 @@ pub struct PrecalculatedMerklePathsProvider { impl PrecalculatedMerklePathsProvider { pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { let next_enumeration_index = input.next_enumeration_index(); - vlog::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); + tracing::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); Self { root_hash, pending_leaves: input.into_merkle_paths().collect(), @@ -54,7 +54,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> } fn get_leaf(&mut self, index: &[u8; 32]) -> LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf> { - vlog::trace!( + tracing::trace!( "Invoked get_leaf({:?}). pending leaves size: {:?}. current root: {:?}", index, self.pending_leaves.len(), @@ -107,7 +107,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> index: &[u8; 32], leaf: ZkSyncStorageLeaf, ) -> LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf> { - vlog::trace!( + tracing::trace!( "Invoked insert_leaf({:?}). pending leaves size: {:?}. current root: {:?}", index, self.pending_leaves.len(), @@ -172,7 +172,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> Vec<([u8; 32], ZkSyncStorageLeaf)>, Vec, ) { - vlog::trace!( + tracing::trace!( "invoked filter_renumerate(), pending leaves size: {:?}", self.pending_leaves.len() ); @@ -221,7 +221,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> query: &LeafQuery<256, 32, 32, 32, ZkSyncStorageLeaf>, ) -> bool { //copied from zkevm_test_harness/src/witness/tree/mod.rs with minor changes - vlog::trace!( + tracing::trace!( "invoked verify_inclusion. Index: {:?}, root: {:?})", query.index, root diff --git a/core/bin/zksync_core/src/witness_generator/scheduler.rs b/core/bin/zksync_core/src/witness_generator/scheduler.rs index f4dce3d2c353..834111fe59e0 100644 --- a/core/bin/zksync_core/src/witness_generator/scheduler.rs +++ b/core/bin/zksync_core/src/witness_generator/scheduler.rs @@ -71,7 +71,7 @@ impl SchedulerWitnessGenerator { ) -> SchedulerArtifacts { let SchedulerWitnessGeneratorJob { block_number, job } = scheduler_job; - vlog::info!( + tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::Scheduler, block_number.0 @@ -196,7 +196,7 @@ pub fn process_scheduler_job( verification_keys, )); - vlog::info!( + tracing::info!( "Verification keys loaded in {:?}", stage_started_at.elapsed() ); @@ -215,7 +215,7 @@ pub fn process_scheduler_job( node_aggregation_vk.clone(), )); - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); + tracing::info!("Commitments generated in {:?}", stage_started_at.elapsed()); let stage_started_at = Instant::now(); let (scheduler_circuit, final_aggregation_result) = @@ -233,12 +233,12 @@ pub fn process_scheduler_job( g2_points, ); - vlog::info!( + tracing::info!( "prepare_scheduler_circuit took {:?}", stage_started_at.elapsed() ); - vlog::info!( + tracing::info!( "Scheduler generation for block {} is complete in {:?}", block_number.0, started_at.elapsed() diff --git a/core/bin/zksync_core/src/witness_generator/utils.rs b/core/bin/zksync_core/src/witness_generator/utils.rs index da2a2141d06e..c457e3c4483f 100644 --- a/core/bin/zksync_core/src/witness_generator/utils.rs +++ b/core/bin/zksync_core/src/witness_generator/utils.rs @@ -1,10 +1,9 @@ -use vm::zk_evm::ethereum_types::U256; use zksync_object_store::{CircuitKey, ObjectStore}; use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::USED_BOOTLOADER_MEMORY_BYTES; use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{U256, USED_BOOTLOADER_MEMORY_BYTES}; pub fn expand_bootloader_contents(packed: &[(usize, U256)]) -> Vec { let mut result: Vec = Vec::new(); diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index 29ef4032142d..e96dd0c0ce2b 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_basic_types" -version = "1.0.0" +version="0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 3b4ae3d3fef8..b116eafd4725 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_circuit_breaker" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,15 +10,16 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_types = { path = "../types", version = "1.0" } -zksync_config = { path = "../config", version = "1.0" } -zksync_contracts = { path = "../contracts", version = "1.0" } -zksync_dal = { path = "../dal", version = "1.0" } -zksync_eth_client = { path = "../eth_client", version = "1.0" } +zksync_types = { path = "../types" } +zksync_config = { path = "../config" } +zksync_contracts = { path = "../contracts" } +zksync_dal = { path = "../dal" } +zksync_eth_client = { path = "../eth_client" } thiserror = "1.0" serde_json = "1.0" futures = { version = "0.3", features = ["compat"] } tokio = { version = "1", features = ["time"] } +anyhow = "1.0" async-trait = "0.1" hex = "0.4" convert_case = "0.6.0" diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index ce7fa30686d2..c2a03e04e686 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use anyhow::Context as _; use futures::channel::oneshot; use thiserror::Error; use tokio::sync::watch; @@ -57,18 +58,19 @@ impl CircuitBreakerChecker { self, circuit_breaker_sender: oneshot::Sender, stop_receiver: watch::Receiver, - ) { + ) -> anyhow::Result<()> { loop { if *stop_receiver.borrow() { break; } if let Err(error) = self.check().await { - circuit_breaker_sender + return circuit_breaker_sender .send(error) - .expect("failed to send circuit breaker messsage"); - return; + .ok() + .context("failed to send circuit breaker messsage"); } tokio::time::sleep(self.sync_interval).await; } + Ok(()) } } diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d6e88c4e4c1e..b1059e719dfe 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_config" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,9 +10,9 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_contracts = { path = "../../lib/contracts", version = "1.0" } +zksync_basic_types = { path = "../../lib/basic_types" } +zksync_utils = { path = "../../lib/utils" } +zksync_contracts = { path = "../../lib/contracts" } url = "2.1" num = "0.3.1" @@ -21,3 +21,4 @@ serde_json = "1.0" envy = "0.4" once_cell = "1.13.0" bigdecimal = "0.2.2" +hex = "0.4" diff --git a/core/lib/config/src/configs/alerts.rs b/core/lib/config/src/configs/alerts.rs index 59b6d0ebddf6..0fa4b6322a8a 100644 --- a/core/lib/config/src/configs/alerts.rs +++ b/core/lib/config/src/configs/alerts.rs @@ -29,7 +29,7 @@ mod tests { sporadic_crypto_errors_substrs: vec![ "EventDestroyErr".to_string(), "Can't free memory of DeviceBuf".to_string(), - "called `Result::unwrap()` on an `Err` value: PoisonError".to_string(), + "value: PoisonError".to_string(), ], } } @@ -38,7 +38,7 @@ mod tests { fn test_from_env() { let mut lock = MUTEX.lock(); let config = r#" - ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS=EventDestroyErr,Can't free memory of DeviceBuf,called `Result::unwrap()` on an `Err` value: PoisonError + ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS="EventDestroyErr,Can't free memory of DeviceBuf,value: PoisonError" "#; lock.set_env(config); diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 271793ee52ad..37a39dde7385 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -95,6 +95,10 @@ pub struct Web3JsonRpcConfig { pub max_batch_request_size: Option, /// Maximum response body size in MiBs. Default is 10 MiB. pub max_response_body_size_mb: Option, + /// Maximum number of requests per minute for the WebSocket server. + /// The value is per active connection. + /// Note: For HTTP, rate limiting is expected to be configured on the infra level. + pub websocket_requests_per_minute_limit: Option, } impl Web3JsonRpcConfig { @@ -176,6 +180,11 @@ impl Web3JsonRpcConfig { pub fn max_response_body_size(&self) -> usize { self.max_response_body_size_mb.unwrap_or(10) * super::BYTES_IN_MEGABYTE } + + pub fn websocket_requests_per_minute_limit(&self) -> u32 { + // The default limit is chosen to be reasonably permissive. + self.websocket_requests_per_minute_limit.unwrap_or(6000) + } } #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -256,6 +265,7 @@ mod tests { fee_history_limit: Some(100), max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), + websocket_requests_per_minute_limit: Some(10), }, contract_verification: ContractVerificationApiConfig { port: 3070, @@ -288,7 +298,7 @@ mod tests { API_WEB3_JSON_RPC_GAS_PRICE_SCALE_FACTOR=1.2 API_WEB3_JSON_RPC_TRANSACTIONS_PER_SEC_LIMIT=1000 API_WEB3_JSON_RPC_REQUEST_TIMEOUT=10 - API_WEB3_JSON_RPC_ACCOUNT_PKS=0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002 + API_WEB3_JSON_RPC_ACCOUNT_PKS="0x0000000000000000000000000000000000000000000000000000000000000001,0x0000000000000000000000000000000000000000000000000000000000000002" API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 @@ -300,6 +310,7 @@ mod tests { API_WEB3_JSON_RPC_WS_THREADS=256 API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 + API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 API_CONTRACT_VERIFICATION_PORT="3070" API_CONTRACT_VERIFICATION_URL="http://127.0.0.1:3070" API_CONTRACT_VERIFICATION_THREADS_PER_SERVER=128 diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index ceeb2cf75d1a..baeff958e8e8 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -26,6 +26,7 @@ pub struct ChainConfig { impl ChainConfig { pub fn from_env() -> Self { Self { + // TODO rename `eth` to `network` network: NetworkConfig::from_env(), state_keeper: StateKeeperConfig::from_env(), operations_manager: OperationsManagerConfig::from_env(), @@ -99,6 +100,13 @@ pub struct StateKeeperConfig { /// Max number of computational gas that validation step is allowed to take. pub validation_computational_gas_limit: u32, pub save_call_traces: bool, + + pub virtual_blocks_interval: u32, + pub virtual_blocks_per_miniblock: u32, + + /// Flag which will enable storage to cache witness_inputs during State Keeper's run. + /// NOTE: This will slow down StateKeeper, to be used in non-production environments! + pub upload_witness_inputs_to_gcs: bool, } impl StateKeeperConfig { @@ -212,6 +220,9 @@ mod tests { default_aa_hash: H256::from(&[254; 32]), validation_computational_gas_limit: 10_000_000, save_call_traces: false, + virtual_blocks_interval: 1, + virtual_blocks_per_miniblock: 1, + upload_witness_inputs_to_gcs: false, }, operations_manager: OperationsManagerConfig { delay_interval: 100, @@ -257,6 +268,7 @@ mod tests { CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" + CHAIN_STATE_KEEPER_UPLOAD_WITNESS_INPUTS_TO_GCS="false" CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index f6530e18ee3f..35c1bbecf9c5 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -31,6 +31,9 @@ pub struct ContractsConfig { pub recursion_leaf_level_vk_hash: H256, pub recursion_circuits_set_vks_hash: H256, pub l1_multicall3_addr: Address, + pub fri_recursion_scheduler_level_vk_hash: H256, + pub fri_recursion_node_level_vk_hash: H256, + pub fri_recursion_leaf_level_vk_hash: H256, } impl ContractsConfig { @@ -81,6 +84,15 @@ mod tests { "0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0", ), l1_multicall3_addr: addr("0xcA11bde05977b3631167028862bE2a173976CA11"), + fri_recursion_scheduler_level_vk_hash: hash( + "0x201d4c7d8e781d51a3bbd451a43a8f45240bb765b565ae6ce69192d918c3563d", + ), + fri_recursion_node_level_vk_hash: hash( + "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + ), + fri_recursion_leaf_level_vk_hash: hash( + "0x72167c43a46cf38875b267d67716edc4563861364a3c03ab7aee73498421e828", + ), } } @@ -111,6 +123,10 @@ CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH="0x1186ec268d49f1905f8d9c1e9d39fc33e98c74 CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH="0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH="0x142a364ef2073132eaf07aa7f3d8495065be5b92a2dc14fda09b4216affed9c0" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" +CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH="0x201d4c7d8e781d51a3bbd451a43a8f45240bb765b565ae6ce69192d918c3563d" +CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH="0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080" +CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH="0x72167c43a46cf38875b267d67716edc4563861364a3c03ab7aee73498421e828" + "#; lock.set_env(config); diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index d4651d60a04c..ca1f49421423 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -32,6 +32,12 @@ pub enum ProofSendingMode { SkipEveryProof, } +#[derive(Debug, Deserialize, Clone, Copy, PartialEq)] +pub enum ProofLoadingMode { + OldProofFromDb, + FriProofFromGcs, +} + #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, @@ -62,6 +68,9 @@ pub struct SenderConfig { pub l1_batch_min_age_before_execute_seconds: Option, // Max acceptable fee for sending tx it acts as a safeguard to prevent sending tx with very high fees. pub max_acceptable_priority_fee_in_gwei: u64, + + /// The mode in which proofs are loaded, either from DB/GCS for FRI/Old proof. + pub proof_loading_mode: ProofLoadingMode, } impl SenderConfig { @@ -148,6 +157,7 @@ mod tests { proof_sending_mode: ProofSendingMode::SkipEveryProof, l1_batch_min_age_before_execute_seconds: Some(1000), max_acceptable_priority_fee_in_gwei: 100_000_000_000, + proof_loading_mode: ProofLoadingMode::OldProofFromDb, }, gas_adjuster: GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -191,6 +201,7 @@ mod tests { ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" + ETH_SENDER_SENDER_PROOF_LOADING_MODE="OldProofFromDb" "#; lock.set_env(config); diff --git a/core/lib/config/src/configs/fri_proof_compressor.rs b/core/lib/config/src/configs/fri_proof_compressor.rs new file mode 100644 index 000000000000..a464a8bf4535 --- /dev/null +++ b/core/lib/config/src/configs/fri_proof_compressor.rs @@ -0,0 +1,68 @@ +use serde::Deserialize; +use std::time::Duration; + +use super::envy_load; + +/// Configuration for the fri proof compressor +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriProofCompressorConfig { + /// The compression mode to use + pub compression_mode: u8, + + /// Configurations for prometheus + pub prometheus_listener_port: u16, + pub prometheus_pushgateway_url: String, + pub prometheus_push_interval_ms: Option, + + /// Max time for proof compression to be performed + pub generation_timeout_in_secs: u16, + /// Max attempts for proof compression to be performed + pub max_attempts: u32, +} + +impl FriProofCompressorConfig { + pub fn from_env() -> Self { + envy_load("fri_proof_compressor", "FRI_PROOF_COMPRESSOR_") + } + + pub fn generation_timeout(&self) -> Duration { + Duration::from_secs(self.generation_timeout_in_secs as u64) + } +} + +#[cfg(test)] +mod tests { + use crate::configs::test_utils::EnvMutex; + + use super::*; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> FriProofCompressorConfig { + FriProofCompressorConfig { + compression_mode: 1, + prometheus_listener_port: 3316, + prometheus_pushgateway_url: "http://127.0.0.1:9091".to_string(), + prometheus_push_interval_ms: Some(100), + generation_timeout_in_secs: 3000, + max_attempts: 5, + } + } + + #[test] + fn from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + FRI_PROOF_COMPRESSOR_COMPRESSION_MODE=1 + FRI_PROOF_COMPRESSOR_PROMETHEUS_LISTENER_PORT=3316 + FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" + FRI_PROOF_COMPRESSOR_PROMETHEUS_PUSH_INTERVAL_MS=100 + FRI_PROOF_COMPRESSOR_GENERATION_TIMEOUT_IN_SECS=3000 + FRI_PROOF_COMPRESSOR_MAX_ATTEMPTS=5 + "#; + lock.set_env(config); + + let actual = FriProofCompressorConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/fri_prover_gateway.rs b/core/lib/config/src/configs/fri_prover_gateway.rs new file mode 100644 index 000000000000..d326e577b3cb --- /dev/null +++ b/core/lib/config/src/configs/fri_prover_gateway.rs @@ -0,0 +1,57 @@ +use super::envy_load; +use serde::Deserialize; +use std::time::Duration; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriProverGatewayConfig { + pub api_url: String, + pub api_poll_duration_secs: u16, + + /// Configurations for prometheus + pub prometheus_listener_port: u16, + pub prometheus_pushgateway_url: String, + pub prometheus_push_interval_ms: Option, +} + +impl FriProverGatewayConfig { + pub fn from_env() -> Self { + envy_load("fri_prover_gateway", "FRI_PROVER_GATEWAY_") + } + + pub fn api_poll_duration(&self) -> Duration { + Duration::from_secs(self.api_poll_duration_secs as u64) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::configs::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> FriProverGatewayConfig { + FriProverGatewayConfig { + api_url: "http://private-dns-for-server".to_string(), + api_poll_duration_secs: 100, + prometheus_listener_port: 3316, + prometheus_pushgateway_url: "http://127.0.0.1:9091".to_string(), + prometheus_push_interval_ms: Some(100), + } + } + + #[test] + fn from_env() { + let config = r#" + FRI_PROVER_GATEWAY_API_URL="http://private-dns-for-server" + FRI_PROVER_GATEWAY_API_POLL_DURATION_SECS="100" + FRI_PROVER_GATEWAY_PROMETHEUS_LISTENER_PORT=3316 + FRI_PROVER_GATEWAY_PROMETHEUS_PUSHGATEWAY_URL="http://127.0.0.1:9091" + FRI_PROVER_GATEWAY_PROMETHEUS_PUSH_INTERVAL_MS=100 + "#; + let mut lock = MUTEX.lock(); + lock.set_env(config); + let actual = FriProverGatewayConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 6d3223d2e247..38d01552e867 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -215,7 +215,7 @@ impl FriProverGroupConfig { assert!(not_in_range.is_empty(), "Aggregation round 1 should only contain circuit IDs 3 to 15. Ids out of range: {:?}", not_in_range); } 2 => { - let expected_range = vec![2]; + let expected_range = [2]; let missing_ids: Vec<_> = expected_range .iter() .filter(|id| !circuit_ids.contains(id)) @@ -240,7 +240,7 @@ impl FriProverGroupConfig { assert!(not_in_range.is_empty(), "Aggregation round 2 should only contain circuit ID 2. Ids out of range: {:?}", not_in_range); } 3 => { - let expected_range = vec![1]; + let expected_range = [1]; let missing_ids: Vec<_> = expected_range .iter() .filter(|id| !circuit_ids.contains(id)) diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index f941c2a9f251..5e88702be315 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -17,6 +17,8 @@ pub struct HouseKeeperConfig { pub fri_witness_generator_job_retrying_interval_ms: u64, pub prover_db_pool_size: u32, pub fri_prover_stats_reporting_interval_ms: u64, + pub fri_proof_compressor_job_retrying_interval_ms: u64, + pub fri_proof_compressor_stats_reporting_interval_ms: u64, } impl HouseKeeperConfig { @@ -46,6 +48,8 @@ mod tests { fri_witness_generator_job_retrying_interval_ms: 30_000, prover_db_pool_size: 2, fri_prover_stats_reporting_interval_ms: 30_000, + fri_proof_compressor_job_retrying_interval_ms: 30_000, + fri_proof_compressor_stats_reporting_interval_ms: 30_000, } } @@ -65,6 +69,8 @@ mod tests { HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS="30000" + HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS="30000" + HOUSE_KEEPER_FRI_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS="30000" "#; lock.set_env(config); diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 292ed1161a81..edc64d112e4f 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -4,7 +4,8 @@ pub use self::{ circuit_synthesizer::CircuitSynthesizerConfig, contract_verifier::ContractVerifierConfig, contracts::ContractsConfig, database::DBConfig, eth_client::ETHClientConfig, eth_sender::ETHSenderConfig, eth_sender::GasAdjusterConfig, eth_watch::ETHWatchConfig, - fetcher::FetcherConfig, fri_prover::FriProverConfig, + fetcher::FetcherConfig, fri_proof_compressor::FriProofCompressorConfig, + fri_prover::FriProverConfig, fri_prover_gateway::FriProverGatewayConfig, fri_witness_generator::FriWitnessGeneratorConfig, fri_witness_vector_generator::FriWitnessVectorGeneratorConfig, object_store::ObjectStoreConfig, proof_data_handler::ProofDataHandlerConfig, prover::ProverConfig, prover::ProverConfigs, @@ -25,7 +26,9 @@ pub mod eth_client; pub mod eth_sender; pub mod eth_watch; pub mod fetcher; +pub mod fri_proof_compressor; pub mod fri_prover; +pub mod fri_prover_gateway; pub mod fri_prover_group; pub mod fri_witness_generator; pub mod fri_witness_vector_generator; diff --git a/core/lib/config/src/configs/object_store.rs b/core/lib/config/src/configs/object_store.rs index 12793594813b..ecfaa250185b 100644 --- a/core/lib/config/src/configs/object_store.rs +++ b/core/lib/config/src/configs/object_store.rs @@ -27,6 +27,10 @@ impl ObjectStoreConfig { pub fn public_from_env() -> Self { envy_load("public_object_store", "PUBLIC_OBJECT_STORE_") } + + pub fn prover_from_env() -> Self { + envy_load("prover_object_store", "PROVER_OBJECT_STORE_") + } } #[cfg(test)] @@ -75,4 +79,19 @@ mod tests { let actual = ObjectStoreConfig::public_from_env(); assert_eq!(actual, expected_config("/public_base_url")); } + + #[test] + fn prover_bucket_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + PROVER_OBJECT_STORE_BUCKET_BASE_URL="/prover_base_url" + PROVER_OBJECT_STORE_MODE="FileBacked" + PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" + PROVER_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" + PROVER_OBJECT_STORE_MAX_RETRIES="5" + "#; + lock.set_env(config); + let actual = ObjectStoreConfig::prover_from_env(); + assert_eq!(actual, expected_config("/prover_base_url")); + } } diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index d15fed3c8821..cf420540d9e4 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -2,10 +2,18 @@ use super::envy_load; use serde::Deserialize; use std::time::Duration; +#[derive(Debug, Deserialize, Clone, Copy, PartialEq)] +pub enum ProtocolVersionLoadingMode { + FromDb, + FromEnvVar, +} + #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, + pub protocol_version_loading_mode: ProtocolVersionLoadingMode, + pub fri_protocol_version_id: u16, } impl ProofDataHandlerConfig { @@ -29,6 +37,8 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, + protocol_version_loading_mode: ProtocolVersionLoadingMode::FromEnvVar, + fri_protocol_version_id: 2, } } @@ -37,6 +47,8 @@ mod tests { let config = r#" PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" + PROOF_DATA_HANDLER_PROTOCOL_VERSION_LOADING_MODE="FromEnvVar" + PROOF_DATA_HANDLER_FRI_PROTOCOL_VERSION_ID="2" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/config/src/configs/utils.rs b/core/lib/config/src/configs/utils.rs index 2d091dc59b0c..b8bee2e96d42 100644 --- a/core/lib/config/src/configs/utils.rs +++ b/core/lib/config/src/configs/utils.rs @@ -1,12 +1,14 @@ -use crate::configs::envy_load; use serde::Deserialize; -use std::time::Duration; + +use std::{env, time::Duration}; + +use crate::configs::envy_load; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct PrometheusConfig { /// Port to which the Prometheus exporter server is listening. pub listener_port: u16, - /// Url of Pushgateway. + /// URL of the push gateway. pub pushgateway_url: String, /// Push interval in ms. pub push_interval_ms: Option, @@ -16,7 +18,18 @@ impl PrometheusConfig { pub fn from_env() -> Self { envy_load("prometheus", "API_PROMETHEUS_") } + pub fn push_interval(&self) -> Duration { Duration::from_millis(self.push_interval_ms.unwrap_or(100)) } + + /// Returns the full endpoint URL for the push gateway. + pub fn gateway_endpoint(&self) -> String { + let gateway_url = &self.pushgateway_url; + let job_id = "zksync-pushgateway"; + let namespace = + env::var("POD_NAMESPACE").unwrap_or_else(|_| "UNKNOWN_NAMESPACE".to_owned()); + let pod = env::var("POD_NAME").unwrap_or_else(|_| "UNKNOWN_POD".to_owned()); + format!("{gateway_url}/metrics/job/{job_id}/namespace/{namespace}/pod/{pod}") + } } diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs index 8af1a04b1ccb..45ddbfbce3e7 100644 --- a/core/lib/config/src/configs/witness_generator.rs +++ b/core/lib/config/src/configs/witness_generator.rs @@ -7,6 +7,13 @@ use serde::Deserialize; // Local uses use super::envy_load; +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub enum BasicWitnessGeneratorDataSource { + FromPostgres, + FromPostgresShadowBlob, + FromBlob, +} + /// Configuration for the witness generation #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct WitnessGeneratorConfig { @@ -26,6 +33,8 @@ pub struct WitnessGeneratorConfig { // This parameter is used in case of performing circuit upgrades(VK/Setup keys), // to not let witness-generator pick new job and finish all the existing jobs with old circuit. pub last_l1_batch_to_process: Option, + /// Where will basic Witness Generator load its data from + pub data_source: BasicWitnessGeneratorDataSource, } impl WitnessGeneratorConfig { @@ -58,6 +67,7 @@ mod tests { blocks_proving_percentage: Some(30), dump_arguments_for_blocks: vec![2, 3], last_l1_batch_to_process: None, + data_source: BasicWitnessGeneratorDataSource::FromBlob, } } @@ -71,6 +81,7 @@ mod tests { WITNESS_MAX_ATTEMPTS=4 WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" WITNESS_BLOCKS_PROVING_PERCENTAGE="30" + WITNESS_DATA_SOURCE="FromBlob" "#; lock.set_env(config); diff --git a/core/lib/config/src/constants/contracts.rs b/core/lib/config/src/constants/contracts.rs index 15b9c9c08207..33b735c8a2d7 100644 --- a/core/lib/config/src/constants/contracts.rs +++ b/core/lib/config/src/constants/contracts.rs @@ -100,6 +100,7 @@ pub const ERC20_TRANSFER_TOPIC: H256 = H256([ 99, 196, 161, 22, 40, 245, 90, 77, 245, 35, 179, 239, ]); +// TODO (SMA-240): Research whether using zero address is ok pub const MINT_AND_BURN_ADDRESS: H160 = H160::zero(); // The storage_log.value database value for a contract that was deployed in a failed transaction. diff --git a/core/lib/config/src/constants/crypto.rs b/core/lib/config/src/constants/crypto.rs index dcc9d34a9ebd..e9ed44ff3081 100644 --- a/core/lib/config/src/constants/crypto.rs +++ b/core/lib/config/src/constants/crypto.rs @@ -6,10 +6,13 @@ pub const ZKPORTER_IS_AVAILABLE: bool = false; /// Depth of the account tree. pub const ROOT_TREE_DEPTH: usize = 256; /// Cost of 1 byte of calldata in bytes. +// TODO (SMA-1609): Double check this value. +// TODO: possibly remove this value. pub const GAS_PER_PUBDATA_BYTE: u32 = 16; /// Maximum amount of bytes in one packed write storage slot. /// Calculated as `(len(hash) + 1) + len(u256)` +// TODO (SMA-1609): Double check this value. pub const MAX_BYTES_PER_PACKED_SLOT: u64 = 65; /// Amount of gas required to publish one slot in pubdata. diff --git a/core/lib/config/src/constants/ethereum.rs b/core/lib/config/src/constants/ethereum.rs index 61889d8afba5..15187892ae86 100644 --- a/core/lib/config/src/constants/ethereum.rs +++ b/core/lib/config/src/constants/ethereum.rs @@ -13,6 +13,7 @@ pub const GUARANTEED_PUBDATA_PER_L1_BATCH: u64 = 4000; /// The maximum number of pubdata per L1 batch. pub const MAX_PUBDATA_PER_L1_BATCH: u64 = 120000; +// TODO: import from zkevm_opcode_defs once VM1.3 is supported pub const MAX_L2_TX_GAS_LIMIT: u64 = 80000000; // The users should always be able to provide `MAX_GAS_PER_PUBDATA_BYTE` gas per pubdata in their diff --git a/core/lib/config/src/constants/fees/intrinsic.rs b/core/lib/config/src/constants/fees/intrinsic.rs index b36c8d57086e..604ba95fc570 100644 --- a/core/lib/config/src/constants/fees/intrinsic.rs +++ b/core/lib/config/src/constants/fees/intrinsic.rs @@ -2,6 +2,9 @@ //! The file with constants related to fees most of which need to be computed use super::IntrinsicSystemGasConstants; +// TODO (SMA-1699): Use this method to ensure that the transactions provide enough +// intrinsic gas on the API level. + pub const fn get_intrinsic_constants() -> IntrinsicSystemGasConstants { IntrinsicSystemGasConstants { l2_tx_intrinsic_gas: 14070, diff --git a/core/lib/config/src/constants/system_context.rs b/core/lib/config/src/constants/system_context.rs index 4802cf3cc8e5..c142d6e73ec0 100644 --- a/core/lib/config/src/constants/system_context.rs +++ b/core/lib/config/src/constants/system_context.rs @@ -2,7 +2,7 @@ // all user-set slots will have the two highest bits set to 0. // That is why all internal slots will have the form `b11...` but we use `b1111 = xff` for simplicity. -use zksync_basic_types::H256; +use zksync_basic_types::{H256, U256}; pub const SYSTEM_CONTEXT_CHAIN_ID_POSITION: H256 = H256::zero(); @@ -55,3 +55,41 @@ pub const SYSTEM_CONTEXT_BLOCK_INFO_POSITION: H256 = H256([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, ]); + +pub const SYSTEM_CONTEXT_BLOCK_HASH_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, +]); + +pub const SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, +]); + +pub const SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, +]); + +pub const SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, +]); + +pub const SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES: u32 = 257; + +// It is equal to SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION + SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES +pub const CURRENT_VIRTUAL_BLOCK_INFO_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0c, +]); + +// It is equal to CURRENT_VIRTUAL_BLOCK_INFO_POSITION + 1 +pub const VIRTUIAL_BLOCK_UPGRADE_INFO_POSITION: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0d, +]); + +/// Block info is stored compactly as SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER * block_number + block_timestamp. +/// This number is equal to 2**128 +pub const SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER: U256 = U256([0, 0, 1, 0]); diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index 380aeddf3a5b..029938f663ee 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_contracts" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,10 +10,11 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_utils = { path = "../utils", version = "1.0" } +zksync_utils = { path = "../utils" } ethabi = "18.0.0" serde_json = "1.0" serde = "1.0" once_cell = "1.7" hex = "0.4" +envy = "0.4" diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index be67ceb9131b..20111285e866 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -1,3 +1,7 @@ +//! Set of utility functions to read contracts both in Yul and Sol format. +//! +//! Careful: some of the methods are reading the contracts based on the ZKSYNC_HOME environment variable. + #![allow(clippy::derive_partial_eq_without_eq)] use ethabi::{ ethereum_types::{H256, U256}, @@ -7,12 +11,14 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use std::{ fs::{self, File}, - path::Path, + path::{Path, PathBuf}, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; -#[derive(Debug)] +pub mod test_contracts; + +#[derive(Debug, Clone)] pub enum ContractLanguage { Sol, Yul, @@ -132,9 +138,14 @@ pub fn known_codes_contract() -> Contract { load_sys_contract("KnownCodesStorage") } -pub fn read_bytecode(path: impl AsRef) -> Vec { +/// Reads bytecode from the path RELATIVE to the ZKSYNC_HOME environment variable. +pub fn read_bytecode(relative_path: impl AsRef) -> Vec { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); - let artifact_path = Path::new(&zksync_home).join(path); + let artifact_path = Path::new(&zksync_home).join(relative_path); + read_bytecode_from_path(artifact_path) +} +/// Reads bytecode from a given path. +pub fn read_bytecode_from_path(artifact_path: PathBuf) -> Vec { let artifact = read_file_to_json_value(artifact_path.clone()); let bytecode = artifact["bytecode"] @@ -152,18 +163,44 @@ pub fn default_erc20_bytecode() -> Vec { } pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { - match lang { - ContractLanguage::Sol => { - read_bytecode(format!( - "etc/system-contracts/artifacts-zk/cache-zk/solpp-generated-contracts/{0}{1}.sol/{1}.json", + DEFAULT_SYSTEM_CONTRACTS_REPO.read_sys_contract_bytecode(directory, name, lang) +} + +pub static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = + Lazy::new(SystemContractsRepo::from_env); + +/// Structure representing a system contract repository - that allows +/// fetching contracts that are located there. +/// As most of the static methods in this file, is loading data based on ZKSYNC_HOME environment variable. +pub struct SystemContractsRepo { + // Path to the root of the system contracts repo. + pub root: PathBuf, +} + +impl SystemContractsRepo { + /// Returns the default system contracts repo with directory based on the ZKSYNC_HOME environment variable. + pub fn from_env() -> Self { + let zksync_home = std::env::var("ZKSYNC_HOME").expect("ZKSYNC_HOME env variable not set."); + let zksync_home = PathBuf::from(zksync_home); + SystemContractsRepo { + root: zksync_home.join("etc/system-contracts"), + } + } + pub fn read_sys_contract_bytecode( + &self, + directory: &str, + name: &str, + lang: ContractLanguage, + ) -> Vec { + match lang { + ContractLanguage::Sol => read_bytecode_from_path(self.root.join(format!( + "artifacts-zk/cache-zk/solpp-generated-contracts/{0}{1}.sol/{1}.json", directory, name - )) - }, - ContractLanguage::Yul => { - read_zbin_bytecode(format!( - "etc/system-contracts/contracts/{0}artifacts/{1}.yul/{1}.yul.zbin", + ))), + ContractLanguage::Yul => read_zbin_bytecode_from_path(self.root.join(format!( + "contracts/{0}artifacts/{1}.yul/{1}.yul.zbin", directory, name - )) + ))), } } } @@ -175,12 +212,12 @@ pub fn read_bootloader_code(bootloader_type: &str) -> Vec { )) } -pub fn read_proved_block_bootloader_bytecode() -> Vec { - read_bootloader_code("proved_block") +pub fn read_proved_batch_bootloader_bytecode() -> Vec { + read_bootloader_code("proved_batch") } -pub fn read_playground_block_bootloader_bytecode() -> Vec { - read_bootloader_code("playground_block") +pub fn read_playground_batch_bootloader_bytecode() -> Vec { + read_bootloader_code("playground_batch") } pub fn get_loadnext_test_contract_path(file_name: &str, contract_name: &str) -> String { @@ -196,9 +233,16 @@ pub fn get_loadnext_test_contract_bytecode(file_name: &str, contract_name: &str) file_name, contract_name ) } -pub fn read_zbin_bytecode(zbin_path: impl AsRef) -> Vec { + +/// Reads zbin bytecode from a given path, relative to ZKSYNC_HOME. +pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); - let bytecode_path = Path::new(&zksync_home).join(zbin_path); + let bytecode_path = Path::new(&zksync_home).join(relative_zbin_path); + read_zbin_bytecode_from_path(bytecode_path) +} + +/// Reads zbin bytecode from a given path. +pub fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } @@ -229,7 +273,7 @@ impl PartialEq for BaseSystemContracts { } pub static PLAYGROUND_BLOCK_BOOTLOADER_CODE: Lazy = Lazy::new(|| { - let bytecode = read_playground_block_bootloader_bytecode(); + let bytecode = read_playground_batch_bootloader_bytecode(); let hash = hash_bytecode(&bytecode); SystemContractCode { @@ -272,13 +316,30 @@ impl BaseSystemContracts { } // BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { - let bootloader_bytecode = read_proved_block_bootloader_bytecode(); + let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. pub fn playground() -> Self { - let bootloader_bytecode = read_playground_block_bootloader_bytecode(); + let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + pub fn playground_pre_virtual_blocks() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_1_3_2/playground_block.yul/playground_block.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + pub fn playground_post_virtual_blocks() -> Self { + let bootloader_bytecode = read_zbin_bytecode("etc/multivm_bootloaders/vm_virtual_blocks/playground_batch.yul/playground_batch.yul.zbin"); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + pub fn playground_post_virtual_blocks_finish_upgrade_fix() -> Self { + let bootloader_bytecode = read_zbin_bytecode("etc/multivm_bootloaders/vm_virtual_blocks_finish_upgrade_fix/playground_batch.yul/playground_batch.yul.zbin"); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } @@ -288,6 +349,27 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_pre_virtual_blocks() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + pub fn estimate_gas_post_virtual_blocks() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_virtual_blocks/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + pub fn estimate_gas_post_virtual_blocks_finish_upgrade_fix() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_virtual_blocks_finish_upgrade_fix/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/utils/src/test_utils.rs b/core/lib/contracts/src/test_contracts.rs similarity index 52% rename from core/lib/utils/src/test_utils.rs rename to core/lib/contracts/src/test_contracts.rs index 00f57491b7eb..9db4051cfdbd 100644 --- a/core/lib/utils/src/test_utils.rs +++ b/core/lib/contracts/src/test_contracts.rs @@ -1,3 +1,6 @@ +use crate::get_loadnext_contract; +use ethabi::ethereum_types::U256; +use ethabi::{Bytes, Token}; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] @@ -39,3 +42,23 @@ impl Default for LoadnextContractExecutionParams { } } } + +impl LoadnextContractExecutionParams { + pub fn to_bytes(&self) -> Bytes { + let loadnext_contract = get_loadnext_contract(); + let contract_function = loadnext_contract.contract.function("execute").unwrap(); + + let params = vec![ + Token::Uint(U256::from(self.reads)), + Token::Uint(U256::from(self.writes)), + Token::Uint(U256::from(self.hashes)), + Token::Uint(U256::from(self.events)), + Token::Uint(U256::from(self.recursive_calls)), + Token::Uint(U256::from(self.deploys)), + ]; + + contract_function + .encode_input(¶ms) + .expect("failed to encode parameters") + } +} diff --git a/core/lib/crypto/Cargo.toml b/core/lib/crypto/Cargo.toml index 838247374c48..56ee24ddfe9a 100644 --- a/core/lib/crypto/Cargo.toml +++ b/core/lib/crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_crypto" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -11,7 +11,7 @@ categories = ["cryptography"] readme = "README.md" [dependencies] -zksync_basic_types = { path = "../basic_types", version = "1.0" } +zksync_basic_types = { path = "../basic_types" } serde = "1.0" thiserror = "1.0" once_cell = "1.7" diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index decf249876b0..1355f37a58d3 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_dal" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,18 +10,17 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_utils = { path = "../utils", version = "1.0" } -zksync_config = { path = "../config", version = "1.0" } -zksync_contracts = { path = "../contracts", version = "1.0" } -zksync_types = { path = "../types", version = "1.0" } -zksync_health_check = { path = "../health_check", version = "0.1.0" } +zksync_utils = { path = "../utils" } +zksync_config = { path = "../config" } +zksync_contracts = { path = "../contracts" } +zksync_types = { path = "../types" } +zksync_health_check = { path = "../health_check" } itertools = "0.10.1" thiserror = "1.0" anyhow = "1.0" -metrics = "0.20" -tokio = { version = "1", features = ["time"] } +metrics = "0.21" +tokio = { version = "1", features = ["full"] } sqlx = { version = "0.5.13", default-features = false, features = [ "runtime-tokio-native-tls", "macros", @@ -31,7 +30,7 @@ sqlx = { version = "0.5.13", default-features = false, features = [ "json", "offline", "migrate", - "ipnetwork" + "ipnetwork", ] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -41,7 +40,8 @@ num = "0.3.1" hex = "0.4" once_cell = "1.7" strum = { version = "0.24", features = ["derive"] } +tracing = "0.1" [dev-dependencies] assert_matches = "1.5.0" -db_test_macro = { path = "../db_test_macro", version = "0.1.0" } +db_test_macro = { path = "../db_test_macro" } diff --git a/core/lib/dal/migrations/20230807152355_virtual_blocks.down.sql b/core/lib/dal/migrations/20230807152355_virtual_blocks.down.sql new file mode 100644 index 000000000000..19bc9003ae74 --- /dev/null +++ b/core/lib/dal/migrations/20230807152355_virtual_blocks.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks +DROP COLUMN virtual_blocks; diff --git a/core/lib/dal/migrations/20230807152355_virtual_blocks.up.sql b/core/lib/dal/migrations/20230807152355_virtual_blocks.up.sql new file mode 100644 index 000000000000..babe1e1c1e9c --- /dev/null +++ b/core/lib/dal/migrations/20230807152355_virtual_blocks.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks +ADD virtual_blocks BIGINT NOT NULL DEFAULT 0; diff --git a/core/lib/dal/migrations/20230822133227_prover_protocol_versions.down.sql b/core/lib/dal/migrations/20230822133227_prover_protocol_versions.down.sql new file mode 100644 index 000000000000..cb8628abbe82 --- /dev/null +++ b/core/lib/dal/migrations/20230822133227_prover_protocol_versions.down.sql @@ -0,0 +1,21 @@ +ALTER TABLE witness_inputs DROP CONSTRAINT IF EXISTS witness_inputs_prover_protocol_version_fkey; +ALTER TABLE witness_inputs ADD CONSTRAINT witness_inputs_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES protocol_versions (id); + +ALTER TABLE leaf_aggregation_witness_jobs DROP CONSTRAINT IF EXISTS leaf_aggregation_witness_jobs_prover_protocol_version_fkey; +ALTER TABLE leaf_aggregation_witness_jobs ADD CONSTRAINT leaf_aggregation_witness_jobs_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES protocol_versions (id); + +ALTER TABLE node_aggregation_witness_jobs DROP CONSTRAINT IF EXISTS node_aggregation_witness_jobs_prover_protocol_version_fkey; +ALTER TABLE node_aggregation_witness_jobs ADD CONSTRAINT node_aggregation_witness_jobs_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES protocol_versions (id); + +ALTER TABLE scheduler_witness_jobs DROP CONSTRAINT IF EXISTS scheduler_witness_jobs_prover_protocol_version_fkey; +ALTER TABLE scheduler_witness_jobs ADD CONSTRAINT scheduler_witness_jobs_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES protocol_versions (id); + +ALTER TABLE prover_jobs DROP CONSTRAINT IF EXISTS prover_jobs_prover_protocol_version_fkey; +ALTER TABLE prover_jobs ADD CONSTRAINT prover_jobs_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES protocol_versions (id); + +DROP TABLE IF EXISTS prover_protocol_versions; diff --git a/core/lib/dal/migrations/20230822133227_prover_protocol_versions.up.sql b/core/lib/dal/migrations/20230822133227_prover_protocol_versions.up.sql new file mode 100644 index 000000000000..bc60c23deca4 --- /dev/null +++ b/core/lib/dal/migrations/20230822133227_prover_protocol_versions.up.sql @@ -0,0 +1,30 @@ +CREATE TABLE prover_protocol_versions ( + id INT PRIMARY KEY, + timestamp BIGINT NOT NULL, + recursion_scheduler_level_vk_hash BYTEA NOT NULL, + recursion_node_level_vk_hash BYTEA NOT NULL, + recursion_leaf_level_vk_hash BYTEA NOT NULL, + recursion_circuits_set_vks_hash BYTEA NOT NULL, + verifier_address BYTEA NOT NULL, + created_at TIMESTAMP NOT NULL +); + +ALTER TABLE witness_inputs DROP CONSTRAINT IF EXISTS witness_inputs_protocol_version_fkey; +ALTER TABLE witness_inputs ADD CONSTRAINT witness_inputs_prover_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES prover_protocol_versions (id); + +ALTER TABLE leaf_aggregation_witness_jobs DROP CONSTRAINT IF EXISTS leaf_aggregation_witness_jobs_protocol_version_fkey; +ALTER TABLE leaf_aggregation_witness_jobs ADD CONSTRAINT leaf_aggregation_witness_jobs_prover_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES prover_protocol_versions (id); + +ALTER TABLE node_aggregation_witness_jobs DROP CONSTRAINT IF EXISTS node_aggregation_witness_jobs_protocol_version_fkey; +ALTER TABLE node_aggregation_witness_jobs ADD CONSTRAINT node_aggregation_witness_jobs_prover_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES prover_protocol_versions (id); + +ALTER TABLE scheduler_witness_jobs DROP CONSTRAINT IF EXISTS scheduler_witness_jobs_protocol_version_fkey; +ALTER TABLE scheduler_witness_jobs ADD CONSTRAINT scheduler_witness_jobs_prover_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES prover_protocol_versions (id); + +ALTER TABLE prover_jobs DROP CONSTRAINT IF EXISTS prover_jobs_protocol_version_fkey; +ALTER TABLE prover_jobs ADD CONSTRAINT prover_jobs_prover_protocol_version_fkey + FOREIGN KEY (protocol_version) REFERENCES prover_protocol_versions (id); diff --git a/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.down.sql b/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.down.sql new file mode 100644 index 000000000000..7a63c6657b9f --- /dev/null +++ b/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE protocol_versions ADD CONSTRAINT protocol_versions_upgrade_tx_hash_fkey + FOREIGN KEY (upgrade_tx_hash) REFERENCES transactions (hash); diff --git a/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.up.sql b/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.up.sql new file mode 100644 index 000000000000..bff0190f68db --- /dev/null +++ b/core/lib/dal/migrations/20230824105133_drop_protocol_versions_tx_fk.up.sql @@ -0,0 +1 @@ +ALTER TABLE protocol_versions DROP CONSTRAINT IF EXISTS protocol_versions_upgrade_tx_hash_fkey; diff --git a/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.down.sql b/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.down.sql new file mode 100644 index 000000000000..53ac23db5b11 --- /dev/null +++ b/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.down.sql @@ -0,0 +1,21 @@ +DROP INDEX IF EXISTS ix_prover_jobs_circuits_0_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_1_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_2_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_3_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_4_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_5_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_6_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_7_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_8_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_9_2; + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_0_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{Scheduler,"L1 messages merklizer"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_1_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Node aggregation","Decommitts sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_2_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Leaf aggregation","Code decommitter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_3_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Log demuxer",Keccak}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_4_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{SHA256,ECRecover}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_5_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"RAM permutation","Storage sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_6_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Storage application","Initial writes pubdata rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_7_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Repeated writes pubdata rehasher","Events sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_8_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"L1 messages sorter","L1 messages rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_9_1 ON public.prover_jobs USING btree (protocol_version, aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Main VM"}'::text[]))); diff --git a/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.up.sql b/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.up.sql new file mode 100644 index 000000000000..be184909137f --- /dev/null +++ b/core/lib/dal/migrations/20230825103341_fix_prover_jobs_protocol_version_index.up.sql @@ -0,0 +1,21 @@ +DROP INDEX IF EXISTS ix_prover_jobs_circuits_0_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_1_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_2_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_3_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_4_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_5_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_6_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_7_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_8_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_9_1; + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_0_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{Scheduler,"L1 messages merklizer"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_1_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Node aggregation","Decommitts sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_2_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Leaf aggregation","Code decommitter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_3_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Log demuxer",Keccak}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_4_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{SHA256,ECRecover}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_5_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"RAM permutation","Storage sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_6_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Storage application","Initial writes pubdata rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_7_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Repeated writes pubdata rehasher","Events sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_8_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"L1 messages sorter","L1 messages rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_9_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) INCLUDE (protocol_version) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Main VM"}'::text[]))); diff --git a/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.down.sql b/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.down.sql new file mode 100644 index 000000000000..0598e2b72c5c --- /dev/null +++ b/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS prover_fri_protocol_versions; diff --git a/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.up.sql b/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.up.sql new file mode 100644 index 000000000000..c4b3a95d89b9 --- /dev/null +++ b/core/lib/dal/migrations/20230831110448_add_prover_fri_protocol_versions_table.up.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS prover_fri_protocol_versions +( + id INT PRIMARY KEY, + recursion_scheduler_level_vk_hash BYTEA NOT NULL, + recursion_node_level_vk_hash BYTEA NOT NULL, + recursion_leaf_level_vk_hash BYTEA NOT NULL, + recursion_circuits_set_vks_hash BYTEA NOT NULL, + created_at TIMESTAMP NOT NULL +); + +ALTER TABLE witness_inputs_fri ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES prover_fri_protocol_versions (id); +ALTER TABLE leaf_aggregation_witness_jobs_fri ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES prover_fri_protocol_versions (id); +ALTER TABLE node_aggregation_witness_jobs_fri ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES prover_fri_protocol_versions (id); +ALTER TABLE scheduler_witness_jobs_fri ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES prover_fri_protocol_versions (id); +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS protocol_version INT REFERENCES prover_fri_protocol_versions (id); diff --git a/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.down.sql b/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.down.sql new file mode 100644 index 000000000000..beea4e546669 --- /dev/null +++ b/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS miniblocks_pending_batch; diff --git a/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.up.sql b/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.up.sql new file mode 100644 index 000000000000..d332f64a5ade --- /dev/null +++ b/core/lib/dal/migrations/20230906070159_miniblocks_pending_batch_index.up.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS miniblocks_pending_batch ON miniblocks USING btree (number) INCLUDE (timestamp) + WHERE l1_batch_number IS NULL; diff --git a/core/lib/dal/migrations/20230906090541_add_proof_compression_table.down.sql b/core/lib/dal/migrations/20230906090541_add_proof_compression_table.down.sql new file mode 100644 index 000000000000..63bade7ab94a --- /dev/null +++ b/core/lib/dal/migrations/20230906090541_add_proof_compression_table.down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS proof_compression_jobs_fri; + +DROP INDEX IF EXISTS idx_proof_compression_jobs_fri_status_processing_attempts; diff --git a/core/lib/dal/migrations/20230906090541_add_proof_compression_table.up.sql b/core/lib/dal/migrations/20230906090541_add_proof_compression_table.up.sql new file mode 100644 index 000000000000..f3e6be6ead07 --- /dev/null +++ b/core/lib/dal/migrations/20230906090541_add_proof_compression_table.up.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS proof_compression_jobs_fri +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY, + attempts SMALLINT NOT NULL DEFAULT 0, + status TEXT NOT NULL, + fri_proof_blob_url TEXT NOT NULL, + l1_proof_blob_url TEXT, + error TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP, + time_taken TIME +); + +CREATE INDEX IF NOT EXISTS idx_proof_compression_jobs_fri_status_processing_attempts + ON proof_compression_jobs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); diff --git a/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.down.sql b/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.down.sql new file mode 100644 index 000000000000..524d259a05ee --- /dev/null +++ b/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS idx_proof_compressor_jobs_fri_queued_order; +DROP INDEX IF EXISTS idx_witness_inputs_fri_queued_order; diff --git a/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.up.sql b/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.up.sql new file mode 100644 index 000000000000..da468c1f48e5 --- /dev/null +++ b/core/lib/dal/migrations/20230911090548_add_index_for_proof_compressor_table.up.sql @@ -0,0 +1,8 @@ +-- Add up migration script here +CREATE INDEX IF NOT EXISTS idx_proof_compressor_jobs_fri_queued_order + ON proof_compression_jobs_fri (l1_batch_number ASC) + WHERE status = 'queued'; + +CREATE INDEX IF NOT EXISTS idx_witness_inputs_fri_queued_order + ON witness_inputs_fri (l1_batch_number ASC) + WHERE status = 'queued'; diff --git a/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.down.sql b/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.down.sql new file mode 100644 index 000000000000..3bcacdc8852c --- /dev/null +++ b/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.down.sql @@ -0,0 +1,8 @@ +DROP INDEX IF EXISTS prover_jobs_fri_composite_index_1; +DROP INDEX IF EXISTS leaf_aggregation_witness_jobs_fri_composite_index_1; +DROP INDEX IF EXISTS node_aggregation_witness_jobs_fri_composite_index_1; +DROP INDEX IF EXISTS scheduler_witness_jobs_fri_composite_index_1; + +CREATE UNIQUE INDEX IF NOT EXISTS prover_jobs_fri_composite_index ON prover_jobs_fri(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number); +CREATE UNIQUE INDEX IF NOT EXISTS leaf_aggregation_witness_jobs_fri_composite_index ON leaf_aggregation_witness_jobs_fri(l1_batch_number, circuit_id); +CREATE UNIQUE INDEX IF NOT EXISTS node_aggregation_witness_jobs_fri_composite_index ON node_aggregation_witness_jobs_fri(l1_batch_number, circuit_id, depth); diff --git a/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.up.sql b/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.up.sql new file mode 100644 index 000000000000..24edd21e7b3f --- /dev/null +++ b/core/lib/dal/migrations/20230913092159_update_prover_fri_index_to_include_protocol_version.up.sql @@ -0,0 +1,9 @@ +DROP INDEX IF EXISTS prover_jobs_fri_composite_index; +DROP INDEX IF EXISTS leaf_aggregation_witness_jobs_fri_composite_index; +DROP INDEX IF EXISTS node_aggregation_witness_jobs_fri_composite_index; + +CREATE UNIQUE INDEX IF NOT EXISTS prover_jobs_fri_composite_index_1 ON prover_jobs_fri(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) INCLUDE(protocol_version); +CREATE UNIQUE INDEX IF NOT EXISTS leaf_aggregation_witness_jobs_fri_composite_index_1 ON leaf_aggregation_witness_jobs_fri(l1_batch_number, circuit_id) INCLUDE(protocol_version); +CREATE UNIQUE INDEX IF NOT EXISTS node_aggregation_witness_jobs_fri_composite_index_1 ON node_aggregation_witness_jobs_fri(l1_batch_number, circuit_id, depth) INCLUDE(protocol_version); +CREATE UNIQUE INDEX IF NOT EXISTS scheduler_witness_jobs_fri_composite_index_1 ON scheduler_witness_jobs_fri(l1_batch_number) INCLUDE(protocol_version); + diff --git a/core/lib/dal/migrations/20230914150834_enum-indices-not-null.down.sql b/core/lib/dal/migrations/20230914150834_enum-indices-not-null.down.sql new file mode 100644 index 000000000000..4f38d0d56db2 --- /dev/null +++ b/core/lib/dal/migrations/20230914150834_enum-indices-not-null.down.sql @@ -0,0 +1 @@ +ALTER TABLE initial_writes ALTER COLUMN index DROP NOT NULL; diff --git a/core/lib/dal/migrations/20230914150834_enum-indices-not-null.up.sql b/core/lib/dal/migrations/20230914150834_enum-indices-not-null.up.sql new file mode 100644 index 000000000000..ac0d0fd76e0e --- /dev/null +++ b/core/lib/dal/migrations/20230914150834_enum-indices-not-null.up.sql @@ -0,0 +1 @@ +ALTER TABLE initial_writes ALTER COLUMN index SET NOT NULL; diff --git a/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.down.sql b/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.down.sql new file mode 100644 index 000000000000..9461e6cf65e0 --- /dev/null +++ b/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE proof_compression_jobs_fri + ALTER COLUMN fri_proof_blob_url SET NOT NULL; diff --git a/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.up.sql b/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.up.sql new file mode 100644 index 000000000000..13b19c5d19e4 --- /dev/null +++ b/core/lib/dal/migrations/20230918093855_remove_not_null_constraint_fri_proof_blob_url_proof_compression_jobs_fri.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE proof_compression_jobs_fri + ALTER COLUMN fri_proof_blob_url DROP NOT NULL; diff --git a/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.down.sql b/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.down.sql new file mode 100644 index 000000000000..547dbb7e4b16 --- /dev/null +++ b/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.down.sql @@ -0,0 +1,12 @@ +ALTER TABLE witness_inputs_fri + DROP COLUMN IF EXISTS picked_by; +ALTER TABLE leaf_aggregation_witness_jobs_fri + DROP COLUMN IF EXISTS picked_by; +ALTER TABLE node_aggregation_witness_jobs_fri + DROP COLUMN IF EXISTS picked_by; +ALTER TABLE scheduler_witness_jobs_fri + DROP COLUMN IF EXISTS picked_by; +ALTER TABLE prover_jobs_fri + DROP COLUMN IF EXISTS picked_by; +ALTER TABLE proof_compression_jobs_fri + DROP COLUMN IF EXISTS picked_by; diff --git a/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.up.sql b/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.up.sql new file mode 100644 index 000000000000..6b73d566527f --- /dev/null +++ b/core/lib/dal/migrations/20230918134637_add_picked_by_column_to_prover_fri_related_tables.up.sql @@ -0,0 +1,12 @@ +ALTER TABLE witness_inputs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; +ALTER TABLE leaf_aggregation_witness_jobs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; +ALTER TABLE node_aggregation_witness_jobs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; +ALTER TABLE scheduler_witness_jobs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; +ALTER TABLE prover_jobs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; +ALTER TABLE proof_compression_jobs_fri + ADD COLUMN IF NOT EXISTS picked_by TEXT; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 2bdee59ce8f4..84799dd2180f 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -1,5 +1,123 @@ { "db": "PostgreSQL", + "0002e8b596794ae9396de8ac621b30dcf0befdff28c5bc23d713185f7a410df4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "UPDATE proof_generation_details SET status=$1, updated_at = now() WHERE l1_batch_number = $2" + }, + "00bd80fd83aff559d8d9232c2e98a12a1dd2c8f31792cd915e2cf11f28e583b7": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "depth", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 5, + "type_info": "Int2" + }, + { + "name": "aggregations_url", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 8, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "number_of_dependent_jobs", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "protocol_version", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "picked_by", + "ordinal": 14, + "type_info": "Text" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false, + true, + true, + true + ], + "parameters": { + "Left": [ + "Int4Array", + "Text" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $2\n WHERE id = (\n SELECT id\n FROM node_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY l1_batch_number ASC, depth ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs_fri.*\n " + }, "01a21fe42c5c0ec0f848739235b8175b62b0ffe503b823c128dd620fec047784": { "describe": { "columns": [], @@ -126,92 +244,33 @@ }, "query": "\n WITH events_select AS (\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE miniblock_number > $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n )\n SELECT miniblocks.hash as \"block_hash?\",\n address as \"address!\", topic1 as \"topic1!\", topic2 as \"topic2!\", topic3 as \"topic3!\", topic4 as \"topic4!\", value as \"value!\",\n miniblock_number as \"miniblock_number!\", miniblocks.l1_batch_number as \"l1_batch_number?\", tx_hash as \"tx_hash!\",\n tx_index_in_block as \"tx_index_in_block!\", event_index_in_block as \"event_index_in_block!\", event_index_in_tx as \"event_index_in_tx!\"\n FROM events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "073d304fe756940303f00b514ef1e24036a1d3d3c3c7fb204b484f681a3520d7": { + "073582051133075adfc51a18d15639129dd00628aa4994b602843ac979ad4419": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4", + "Int8", + "Bytea", + "Text", "Int4" ] } }, - "query": "UPDATE eth_txs\n SET confirmed_eth_tx_history_id = $1\n WHERE id = $2" + "query": "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, protocol_version, created_at, updated_at) VALUES ($1, $2, $3, 'queued', $4, now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" }, - "078adbe9a9973c96c8911725c2b2ce449f83897b324c434b04ffe4d1dd40484c": { + "073d304fe756940303f00b514ef1e24036a1d3d3c3c7fb204b484f681a3520d7": { "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "merkle_tree_paths_blob_url", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" - }, - { - "name": "status", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 5, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "processing_started_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 8, - "type_info": "Time" - }, - { - "name": "is_blob_cleaned", - "ordinal": 9, - "type_info": "Bool" - } - ], - "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - true, - true - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int4", + "Int4" ] } }, - "query": "\n UPDATE witness_inputs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs_fri\n WHERE l1_batch_number <= $1\n AND status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs_fri.*\n " + "query": "UPDATE eth_txs\n SET confirmed_eth_tx_history_id = $1\n WHERE id = $2" }, "0c212f47b9a0e719f947a419be8284837b1b01aa23994ba6401b420790b802b8": { "describe": { @@ -593,117 +652,25 @@ }, "query": "SELECT tx_hash FROM eth_txs_history\n WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL" }, - "142c812f70d8c0cef986bef9b3c058e148f2cfb1c2c933ff321cf498b9c6e3b2": { + "14815f61d37d274f9aea1125ca4d368fd8c45098b0017710c0ee18d23d994c15": { "describe": { "columns": [ { "name": "number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "l1_batch_number!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "last_batch_miniblock?", - "ordinal": 2, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 3, - "type_info": "Int8" - }, - { - "name": "root_hash?", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "commit_tx_hash?", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "committed_at?", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "prove_tx_hash?", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "proven_at?", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "execute_tx_hash?", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "executed_at?", - "ordinal": 10, - "type_info": "Timestamp" - }, - { - "name": "l1_gas_price", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 12, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 13, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 14, - "type_info": "Bytea" - }, - { - "name": "fee_account_address?", - "ordinal": 15, - "type_info": "Bytea" } ], "nullable": [ - false, - null, - null, - false, - false, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true, false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, - "14815f61d37d274f9aea1125ca4d368fd8c45098b0017710c0ee18d23d994c15": { + "15135331e56e3e4e3eeae3aac609d8e8c7146d190dfe26c1a24f92d21cd34858": { "describe": { "columns": [ { @@ -715,34 +682,13 @@ "nullable": [ false ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" - }, - "14fb05ec0acec5a4f24752c60768d72bf19d9953468dc691e3f3b8519e6d3ada": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4" + "Int8" ] } }, - "query": "INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, protocol_version, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, now(), now())" + "query": "SELECT number from miniblocks where timestamp > $1 ORDER BY number ASC LIMIT 1" }, "151aa7cab859c275f74f981ed146415e1e5242ebe259552d5b9fac333c0d9ce8": { "describe": { @@ -1196,6 +1142,19 @@ }, "query": "\n SELECT COUNT(*) as \"count!\", circuit_id as \"circuit_id!\", aggregation_round as \"aggregation_round!\", status as \"status!\"\n FROM prover_jobs_fri\n GROUP BY circuit_id, aggregation_round, status\n " }, + "1ed353a16e8d0abaf426e5c235b20a79c727c08bc23fb1708a833a6930131691": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + } + }, + "query": "INSERT INTO proof_compression_jobs_fri(l1_batch_number, status, created_at, updated_at) VALUES ($1, $2, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" + }, "1eede5c2169aee5a767b3b6b829f53721c0c353956ccec31a75226a65325ae46": { "describe": { "columns": [], @@ -1520,6 +1479,20 @@ }, "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true AND miniblock_number IS NOT NULL" }, + "22e50b6def0365ddf979b64c3c943e2a3f8e5a1abcf72e61a00a82780d2d364e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Text" + ] + } + }, + "query": "INSERT INTO proof_compression_jobs_fri(l1_batch_number, fri_proof_blob_url, status, created_at, updated_at) VALUES ($1, $2, $3, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" + }, "2397c1a050d358b596c9881c379bf823e267c03172f72c42da84cc0c04cc9d93": { "describe": { "columns": [ @@ -1558,6 +1531,20 @@ }, "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " }, + "23c154c243f27912320ea0d68bc7bb372517010fb8c5737621cadd7b408afe8d": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Int8" + ] + } + }, + "query": "UPDATE proof_compression_jobs_fri SET status =$1, error= $2, updated_at = now() WHERE l1_batch_number = $3" + }, "2424f0ab2b156e953841107cfc0ccd76519d13c62fdcd5fd6b39e3503d6ec82c": { "describe": { "columns": [], @@ -1643,55 +1630,193 @@ } ], "nullable": [ - false, - false, - false + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT hash, number, timestamp FROM miniblocks WHERE number > $1 ORDER BY number ASC" + }, + "26ac14152ade97892cd78d37884523187a5619093887b5e6564c3a80741b9d94": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "recursion_scheduler_level_vk_hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "recursion_node_level_vk_hash", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "recursion_leaf_level_vk_hash", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "recursion_circuits_set_vks_hash", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "bootloader_code_hash", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "default_account_code_hash", + "ordinal": 7, + "type_info": "Bytea" + }, + { + "name": "verifier_address", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "upgrade_tx_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "created_at", + "ordinal": 10, + "type_info": "Timestamp" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT * FROM protocol_versions WHERE id = $1" + }, + "297d6517ec5f050e8d8fe4878e4ff330b4b10af4d60de86e8a25e2cd70e0363b": { + "describe": { + "columns": [ + { + "name": "verification_info", + "ordinal": 0, + "type_info": "Jsonb" + } + ], + "nullable": [ + true ], "parameters": { "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT verification_info FROM contracts_verification_info WHERE address = $1" + }, + "2985ea2bf34a94573103654c00a49d2a946afe5d552ac1c2a2d055eb9d6f2cf1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", "Int8" ] } }, - "query": "SELECT hash, number, timestamp FROM miniblocks WHERE number > $1 ORDER BY number ASC" + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " }, - "2928cd054e9d6898559f964906a2ee0d3750fbe6fbd99209a48fc7b197fa2a22": { + "2a38561e789af470d6ef1a905143f2d8d102b4ff23cebe97586681da9e4084a9": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "timestamp", "ordinal": 1, "type_info": "Int8" }, { - "name": "circuit_id", + "name": "hash", "ordinal": 2, - "type_info": "Int2" + "type_info": "Bytea" }, { - "name": "aggregation_round", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Int2" + "type_info": "Int4" }, { - "name": "sequence_number", + "name": "l2_tx_count", "ordinal": 4, "type_info": "Int4" }, { - "name": "depth", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "is_node_final_proof", + "name": "l1_gas_price", "ordinal": 6, - "type_info": "Bool" + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "virtual_blocks", + "ordinal": 11, + "type_info": "Int8" } ], "nullable": [ @@ -1701,49 +1826,41 @@ false, false, false, + false, + false, + true, + true, + true, false ], "parameters": { "Left": [ - "Int2Array", - "Int2Array" + "Int8" ] } }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND (circuit_id, aggregation_round) IN (\n SELECT * FROM UNNEST($1::smallint[], $2::smallint[])\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, virtual_blocks\n FROM miniblocks WHERE number = $1" }, - "297d6517ec5f050e8d8fe4878e4ff330b4b10af4d60de86e8a25e2cd70e0363b": { + "2a98f1b149045f25d2830c0b4ffaaa400b4c572eb3842add22e8540f44943711": { "describe": { "columns": [ { - "name": "verification_info", + "name": "id", "ordinal": 0, - "type_info": "Jsonb" + "type_info": "Int8" } ], "nullable": [ - true + false ], "parameters": { "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT verification_info FROM contracts_verification_info WHERE address = $1" - }, - "2985ea2bf34a94573103654c00a49d2a946afe5d552ac1c2a2d055eb9d6f2cf1": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Time", - "Int8" + "Int8", + "Int2" ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " + "query": "SELECT id from prover_jobs_fri WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2" }, "2adfdba6fa2b6b967ba03ae6f930e7f3ea851f678d30df699ced27b2dbb01c2a": { "describe": { @@ -2075,18 +2192,18 @@ }, "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n AND tx_format != $4\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery1\n ORDER BY hash\n ) as subquery2\n WHERE transactions.hash = subquery2.hash\n RETURNING transactions.*" }, - "2dbadf3ff3134bc35bc98cf7201097256aed32b75d3809d7d24c95f70672e21c": { + "2e3f116ca05ae70b7c83ac550302194c91f57b69902ff8e42140fde732ae5e6a": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray", - "Int8Array" + "Int8", + "Int4Array" ] } }, - "query": "UPDATE initial_writes SET index = data_table.index FROM ( SELECT UNNEST($1::bytea[]) as hashed_key, UNNEST($2::bigint[]) as index ) as data_table WHERE initial_writes.hashed_key = data_table.hashed_key" + "query": "DELETE FROM storage_logs WHERE miniblock_number = $1 AND operation_number != ALL($2)" }, "2e543dc0013150040bb86e278bbe86765ce1ebad72a32bb931fe02a9c516a11c": { "describe": { @@ -2370,6 +2487,23 @@ }, "query": "SELECT * FROM transactions WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL ORDER BY miniblock_number, index_in_block" }, + "3055b9f38a04f26dac9adbba978679e6877f44c758fd03461e940a8f9a4e5af1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Int4", + "Text", + "Int4", + "Int4" + ] + } + }, + "query": "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id, depth)\n DO UPDATE SET updated_at=now()" + }, "334197fef9eeca55790d366ae67bbe95d77181bdfd2ad3208a32bd50585aef2d": { "describe": { "columns": [ @@ -2408,6 +2542,134 @@ }, "query": "SELECT MAX(number) as \"number\" FROM miniblocks" }, + "3365f652e8e0070672ab522bd60f92d002dac7bb782763575a0337a8b5502994": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "last_batch_miniblock?", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 13, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "virtual_blocks", + "ordinal": 15, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 16, + "type_info": "Bytea" + }, + { + "name": "protocol_version!", + "ordinal": 17, + "type_info": "Int4" + }, + { + "name": "fee_account_address?", + "ordinal": 18, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + null, + false, + false, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + false, + false, + true, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version as \"protocol_version!\",\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + }, "357347157ed8ff19d223c54533c3a85bd7e64a37514d657f8d49bd6eb5be1806": { "describe": { "columns": [ @@ -2506,17 +2768,35 @@ } ], "nullable": [ - false, - true, - true + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + }, + "37e4a0eea7b72bd3b75c26e003f3fa62039d9b614f0f2fa3d61e8c5e95f002fd": { + "describe": { + "columns": [ + { + "name": "max?", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND leaf_layer_subqueues_blob_url is NOT NULL\n AND aggregation_outputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "SELECT MAX(index) as \"max?\" FROM initial_writes" }, "38a3bdae346fdd362452af152c6886c93696dd2db561f6622f8eaf6fabb1e5be": { "describe": { @@ -2598,6 +2878,94 @@ }, "query": "\n SELECT * FROM call_traces\n WHERE tx_hash = $1\n " }, + "3a18d0d1e236d8f57e8b3b1218a24414639a7c8235ba6a514c3d03b8a1790f17": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "merkle_tree_paths_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "status", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "error", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 5, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "processing_started_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 8, + "type_info": "Time" + }, + { + "name": "is_blob_cleaned", + "ordinal": 9, + "type_info": "Bool" + }, + { + "name": "protocol_version", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "picked_by", + "ordinal": 11, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + false, + false, + true, + false, + false, + true, + true, + true, + true, + true + ], + "parameters": { + "Left": [ + "Int8", + "Int4Array", + "Text" + ] + } + }, + "query": "\n UPDATE witness_inputs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $3\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs_fri\n WHERE l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = ANY($2)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs_fri.*\n " + }, "3ac1fe562e9664bbf8c02ba3090cf97a37663e228eff48fec326f74b2313daa9": { "describe": { "columns": [], @@ -2782,6 +3150,33 @@ }, "query": "UPDATE storage SET value = u.value FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) WHERE u.key = hashed_key" }, + "400bb5f012b95f5b327a65bf8a55e61a9e41a8040f546d75b9b8aa6be45e78d5": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int4", + "Int8" + ] + } + }, + "query": "SELECT number, hash FROM miniblocks WHERE protocol_version = $1 ORDER BY number DESC LIMIT $2" + }, "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { "describe": { "columns": [], @@ -3034,6 +3429,26 @@ }, "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " }, + "43b5082ff7673ee3a8e8f3fafa64667fac4f7f5c8bd26a21ead6b4ba0f8fd17b": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT hash FROM miniblocks WHERE number = $1" + }, "448d283cab6ae334de9676f69416974656d11563b58e0188d53ca9e0995dd287": { "describe": { "columns": [], @@ -3046,6 +3461,16 @@ }, "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queued'\n WHERE l1_batch_number = ANY($1)\n " }, + "4588d998b3454d8210190c6b16116b5885f6f3e74606aec8250e6c1e8f55d242": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "VACUUM storage_logs" + }, "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { "columns": [ @@ -3110,25 +3535,21 @@ }, "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" }, - "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { + "4aef34fb19a07dbfe2be09024d6c7fc2033a8e1570cc7f002a5c78317ff8ff3f": { "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int8", + "Int2", + "Text", + "Int4", + "Int4" ] } }, - "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" + "query": "\n INSERT INTO leaf_aggregation_witness_jobs_fri\n (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id)\n DO UPDATE SET updated_at=now()\n " }, "4b8597a47c0724155ad9592dc32134523bcbca11c9d82763d1bebbe17479c7b4": { "describe": { @@ -3395,6 +3816,39 @@ }, "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " }, + "4fca2f4497b3b5040cb8ccefe44a29c2583578942fd7c58e71c0eaeb2d9bec9e": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int2" + ] + } + }, + "query": "UPDATE proof_compression_jobs_fri SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) OR (status = 'failed' AND attempts < $2) RETURNING l1_batch_number, status, attempts" + }, "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { "describe": { "columns": [], @@ -3433,24 +3887,6 @@ }, "query": "SELECT bootloader_code_hash, default_account_code_hash FROM protocol_versions\n WHERE id = $1\n " }, - "51d788b5e8d808db143b6c057485f0a0b314a0c33e3eb2dff99ca0b32d12f8e4": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text", - "Int2", - "Int4", - "Int4", - "Bool" - ] - } - }, - "query": "\n INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number)\n DO UPDATE SET updated_at=now()\n " - }, "52eeb8c529efb796fdefb30a381fcf6c931512f30e55e24c155f6c649e662909": { "describe": { "columns": [ @@ -3469,19 +3905,6 @@ }, "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queuing'\n WHERE l1_batch_number IN\n (SELECT l1_batch_number FROM scheduler_dependency_tracker_fri\n WHERE status != 'queued'\n AND circuit_1_final_prover_job_id IS NOT NULL\n AND circuit_2_final_prover_job_id IS NOT NULL\n AND circuit_3_final_prover_job_id IS NOT NULL\n AND circuit_4_final_prover_job_id IS NOT NULL\n AND circuit_5_final_prover_job_id IS NOT NULL\n AND circuit_6_final_prover_job_id IS NOT NULL\n AND circuit_7_final_prover_job_id IS NOT NULL\n AND circuit_8_final_prover_job_id IS NOT NULL\n AND circuit_9_final_prover_job_id IS NOT NULL\n AND circuit_10_final_prover_job_id IS NOT NULL\n AND circuit_11_final_prover_job_id IS NOT NULL\n AND circuit_12_final_prover_job_id IS NOT NULL\n AND circuit_13_final_prover_job_id IS NOT NULL\n )\n RETURNING l1_batch_number;\n " }, - "53726a35b24a838df04c1f7201da322aab287830c96fc2c712a67d360bbc2bd0": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - } - }, - "query": "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" - }, "5490012051be6faaaa11fad0f196eb53160a9c5c045fe9d66afcef7f33403fe2": { "describe": { "columns": [ @@ -3618,6 +4041,18 @@ }, "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " }, + "565a302151a5a55aa717048e3e21b5d7379ab47c2b80229024f0cb2699136b11": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "UPDATE miniblocks SET protocol_version = $1 WHERE l1_batch_number IS NULL" + }, "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { "describe": { "columns": [], @@ -3668,20 +4103,41 @@ "nullable": [], "parameters": { "Left": [ - "Int4", - "Int4" + "Int4", + "Int4" + ] + } + }, + "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" + }, + "58489a4e8730646ce20efee849742444740c72f59fad2495647742417ed0ab5a": { + "describe": { + "columns": [ + { + "name": "base_fee_per_gas", + "ordinal": 0, + "type_info": "Numeric" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" ] } }, - "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" + "query": "SELECT base_fee_per_gas FROM miniblocks WHERE number <= $1 ORDER BY number DESC LIMIT $2" }, - "58489a4e8730646ce20efee849742444740c72f59fad2495647742417ed0ab5a": { + "58ae859333cf7fadbb83d9cde66dee2abe18b4883f883e69130024d11a4a5cc6": { "describe": { "columns": [ { - "name": "base_fee_per_gas", + "name": "number", "ordinal": 0, - "type_info": "Numeric" + "type_info": "Int8" } ], "nullable": [ @@ -3690,11 +4146,11 @@ "parameters": { "Left": [ "Int8", - "Int8" + "Numeric" ] } }, - "query": "SELECT base_fee_per_gas FROM miniblocks WHERE number <= $1 ORDER BY number DESC LIMIT $2" + "query": "SELECT number FROM ( SELECT number, sum(virtual_blocks) OVER(ORDER BY number) AS virtual_block_sum FROM miniblocks WHERE l1_batch_number >= $1 ) AS vts WHERE virtual_block_sum <= $2 ORDER BY number DESC LIMIT 1" }, "5922fdf40632a6ffecfe824a3ba29bcf7b379aff5253db2739cc7be6145524e8": { "describe": { @@ -3744,7 +4200,7 @@ ], "nullable": [ false, - true + false ], "parameters": { "Left": [ @@ -3772,6 +4228,27 @@ }, "query": "SELECT MAX(l1_batch_number) FROM witness_inputs WHERE merkel_tree_paths_blob_url IS NOT NULL" }, + "5a31eab41a980cc82ad3609610d377a185ce38bd654ee93766c119aa6cae1040": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8", + "Numeric" + ] + } + }, + "query": "SELECT number FROM ( SELECT number, sum(virtual_blocks) OVER(ORDER BY number) AS virtual_block_sum FROM miniblocks WHERE l1_batch_number >= $1 ) AS vts WHERE virtual_block_sum >= $2 ORDER BY number LIMIT 1" + }, "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { "describe": { "columns": [ @@ -4091,6 +4568,19 @@ }, "query": "SELECT version FROM compiler_versions WHERE compiler = $1 ORDER by version" }, + "633765abd4635ee7acb24c13af55153de09e911ecdf70fcd9cd08c7c5c452d7a": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "TextArray", + "Text" + ] + } + }, + "query": "\n INSERT INTO compiler_versions (version, compiler, created_at, updated_at)\n SELECT u.version, $2, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n ON CONFLICT (version, compiler) DO NOTHING" + }, "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { "describe": { "columns": [ @@ -4123,6 +4613,65 @@ }, "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " }, + "654e133230ee435e95cfda5bc4d72c8dd26412fe9d364e218e95eb4fe64559e9": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" + }, + { + "name": "sequence_number", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Int2Array", + "Int2Array", + "Int4Array", + "Text" + ] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $4\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND (circuit_id, aggregation_round) IN (\n SELECT * FROM UNNEST($1::smallint[], $2::smallint[])\n )\n AND protocol_version = ANY($3)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + }, "665112c83ed7f126f94d1c47408de3495ee6431970e334d94ae75f853496eb48": { "describe": { "columns": [], @@ -4265,6 +4814,29 @@ }, "query": "SELECT MAX(id) as \"max?\" FROM protocol_versions" }, + "6a3af113a71bffa445d4a729e24fbc2be90bfffbdd072c74f9ca58669b7e5f80": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "SELECT id FROM prover_fri_protocol_versions WHERE recursion_circuits_set_vks_hash = $1 AND recursion_leaf_level_vk_hash = $2 AND recursion_node_level_vk_hash = $3 AND recursion_scheduler_level_vk_hash = $4 " + }, "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { "describe": { "columns": [ @@ -4298,15 +4870,50 @@ }, "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " }, - "6ea2cd1c5df69ba8ab1ef635b64870587325219abbef188007747851e313b084": { + "6b53e5cb619c9649d28ae33df6a43e6984e2d9320f894f3d04156a2d1235bb60": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT hash FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number" + }, + "6c0915ed87e6d0fdf83cb24a51cc277e366bea0ba8821c048092d2a0aadb2771": { "describe": { "columns": [], "nullable": [], "parameters": { - "Left": [] + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Int4", + "Int8" + ] } }, - "query": "UPDATE initial_writes SET index = NULL" + "query": "INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, protocol_version, virtual_blocks, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, now(), now())" }, "6ffd22b0590341c38ce3957dccdb5a4edf47fb558bc64e4df08897a0c72dbf23": { "describe": { @@ -4714,30 +5321,6 @@ }, "query": "SELECT max(l1_batches.number) FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1" }, - "778dd9ef4d302f38f068aceabf3872c0325fbdb5cfc7c18feb5db3768d98564f": { - "describe": { - "columns": [ - { - "name": "index", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - } - ], - "nullable": [ - true, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT index, l1_batch_number FROM initial_writes WHERE index IS NOT NULL ORDER BY index DESC LIMIT 1" - }, "780b30e56a3ecfb3daa5310168ac6cd9e94bd5f1d871e1eaf36fbfd463a5e7e0": { "describe": { "columns": [ @@ -4846,20 +5429,19 @@ }, "query": "SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs WHERE status = 'successful' AND is_blob_cleaned = FALSE AND merkel_tree_paths_blob_url is NOT NULL AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1" }, - "7bbb3ba8c9860818d04bad46dee94f59d054619c961fd3d59d26fcb364598d5d": { + "7b8043a59029a19a3ba2433a438e8a4fe560aba7eda57b7a63b580de2e19aacb": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ "Int8", - "Int2", "Text", "Int4" ] } }, - "query": "\n INSERT INTO leaf_aggregation_witness_jobs_fri\n (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id)\n DO UPDATE SET updated_at=now()\n " + "query": "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, protocol_version, status, created_at, updated_at) VALUES ($1, $2, $3, 'queued', now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" }, "7c3e55a10c8cf90e60001bca401113fd5335ec6c4b1ffdb6d6ff063d244d23e2": { "describe": { @@ -5153,95 +5735,32 @@ }, "query": "SELECT miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\", shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value FROM l2_to_l1_logs WHERE tx_hash = $1 ORDER BY log_index_in_tx ASC" }, - "84b6ac6bc44503de193e0e4e1201ffd200eddf690722659dad6ddea0604427dc": { + "84703029e09ab1362aa4b4177b38be594d2daf17e69508cae869647028055efb": { "describe": { "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, { "name": "l1_batch_number", - "ordinal": 1, + "ordinal": 0, "type_info": "Int8" }, - { - "name": "circuit_id", - "ordinal": 2, - "type_info": "Int2" - }, - { - "name": "depth", - "ordinal": 3, - "type_info": "Int4" - }, { "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 5, - "type_info": "Int2" - }, - { - "name": "aggregations_url", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 8, - "type_info": "Time" - }, - { - "name": "error", - "ordinal": 9, + "ordinal": 1, "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 10, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 11, - "type_info": "Timestamp" - }, - { - "name": "number_of_dependent_jobs", - "ordinal": 12, - "type_info": "Int4" } ], "nullable": [ false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true + false ], "parameters": { - "Left": [] + "Left": [ + "Text", + "Text" + ] } }, - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM node_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, depth ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs_fri.*\n " + "query": "SELECT l1_batch_number, status FROM proof_compression_jobs_fri\n WHERE l1_batch_number = ( SELECT MIN(l1_batch_number) FROM proof_compression_jobs_fri WHERE status = $1 OR status = $2\n )" }, "852b8d72a8dcbf620e528e983b836b2b05596eb0b7c5d7d1791080bef6a6b821": { "describe": { @@ -5678,72 +6197,6 @@ }, "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" }, - "8d48fb84bd08f6103fe28d13331f4e3422b61adab6037e8760b0ca7b1a48907e": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "scheduler_partial_input_blob_url", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "status", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 3, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 4, - "type_info": "Time" - }, - { - "name": "error", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "attempts", - "ordinal": 8, - "type_info": "Int2" - } - ], - "nullable": [ - false, - false, - false, - true, - true, - true, - false, - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs_fri.*\n " - }, "8dcbaaa6186da52ca8b440b6428826288dc668af5a6fc99ef3078c8bcb38c419": { "describe": { "columns": [ @@ -5774,95 +6227,17 @@ }, "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id, depth) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth\n FROM prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs\n HAVING COUNT(*) = nawj.number_of_dependent_jobs)\n RETURNING l1_batch_number, circuit_id, depth;\n " }, - "8de48960815f48f5d66e82b770a2e0caee42261643ec535a8f21cba1b5d4f50d": { + "8f75c5aa615080fc02b60baccae9c49a81e282a54864ea3eb874ebe10a23eafe": { "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "circuit_id", - "ordinal": 2, - "type_info": "Int2" - }, - { - "name": "closed_form_inputs_blob_url", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 4, - "type_info": "Int2" - }, - { - "name": "status", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 7, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "processing_started_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 10, - "type_info": "Time" - }, - { - "name": "is_blob_cleaned", - "ordinal": 11, - "type_info": "Bool" - }, - { - "name": "number_of_basic_circuits", - "ordinal": 12, - "type_info": "Int4" - } - ], - "nullable": [ - false, - false, - false, - true, - false, - false, - true, - false, - false, - true, - true, - true, - true - ], + "columns": [], + "nullable": [], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM leaf_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs_fri.*\n " + "query": "UPDATE prover_jobs_fri SET status = 'sent_to_server', updated_at = now() WHERE l1_batch_number = $1" }, "8fa1a390d7b11b60b3352fafc0a8a7fa15bc761b1bb902f5105fd66b2e3087f2": { "describe": { @@ -5945,22 +6320,6 @@ }, "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" }, - "908f10640f805957e3f77ed685a7170345d835166e1857c12d76c15b09dffff5": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Int4", - "Text", - "Int4" - ] - } - }, - "query": "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id, depth)\n DO UPDATE SET updated_at=now()" - }, "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { "describe": { "columns": [ @@ -6204,130 +6563,43 @@ "type_info": "Time" }, { - "name": "aggregation_round", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "result", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "sequence_number", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "attempts", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "circuit_input_blob_url", - "ordinal": 14, - "type_info": "Text" - }, - { - "name": "proccesed_by", - "ordinal": 15, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 16, - "type_info": "Bool" - }, - { - "name": "protocol_version", - "ordinal": 17, - "type_info": "Int4" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - true, - false, - false, - false, - false, - true, - false, - false, - true, - true, - false, - true - ], - "parameters": { - "Left": [ - "Int4Array" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " - }, - "997229f1dd293ccc0379ffb3df49d69a4ea145b5d263eea53b7abc635dd53cc6": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "l1_tx_count", - "ordinal": 3, + "name": "aggregation_round", + "ordinal": 10, "type_info": "Int4" }, { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" + "name": "result", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "base_fee_per_gas", - "ordinal": 5, - "type_info": "Numeric" + "name": "sequence_number", + "ordinal": 12, + "type_info": "Int4" }, { - "name": "l1_gas_price", - "ordinal": 6, - "type_info": "Int8" + "name": "attempts", + "ordinal": 13, + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", - "ordinal": 7, - "type_info": "Int8" + "name": "circuit_input_blob_url", + "ordinal": 14, + "type_info": "Text" }, { - "name": "bootloader_code_hash", - "ordinal": 8, - "type_info": "Bytea" + "name": "proccesed_by", + "ordinal": 15, + "type_info": "Text" }, { - "name": "default_aa_code_hash", - "ordinal": 9, - "type_info": "Bytea" + "name": "is_blob_cleaned", + "ordinal": 16, + "type_info": "Bool" }, { "name": "protocol_version", - "ordinal": 10, + "ordinal": 17, "type_info": "Int4" } ], @@ -6337,18 +6609,27 @@ false, false, false, + true, + true, + false, + false, false, false, + true, + false, false, true, true, + false, true ], "parameters": { - "Left": [] + "Left": [ + "Int4Array" + ] } }, - "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM miniblocks ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, "99d331d233d357302ab0cc7e3269ef9e414f0c3111785212660f471e3b4f6a04": { "describe": { @@ -6498,6 +6779,25 @@ }, "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, updated_at = now() WHERE number = $14 AND hash IS NULL" }, + "9d28c1be3bda0c4fb37567d4a56730e801f48fbb2abad42ea894ebd8ee40412d": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text", + "Int2", + "Int4", + "Int4", + "Bool", + "Int4" + ] + } + }, + "query": "\n INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number)\n DO UPDATE SET updated_at=now()\n " + }, "9feee3fd267dc4e58185aeae7cab798c03eefa69470e4b98716615cecf6c012a": { "describe": { "columns": [ @@ -6572,6 +6872,111 @@ }, "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n " }, + "a074cd2c23434a8e801c2c0b42e63f1657765aceabd6d8a50ef2d2299bba99ab": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "closed_form_inputs_blob_url", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 4, + "type_info": "Int2" + }, + { + "name": "status", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "error", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "processing_started_at", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 10, + "type_info": "Time" + }, + { + "name": "is_blob_cleaned", + "ordinal": 11, + "type_info": "Bool" + }, + { + "name": "number_of_basic_circuits", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "protocol_version", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "picked_by", + "ordinal": 14, + "type_info": "Text" + } + ], + "nullable": [ + false, + false, + false, + true, + false, + false, + true, + false, + false, + true, + true, + true, + true, + true, + true + ], + "parameters": { + "Left": [ + "Int4Array", + "Text" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $2\n WHERE id = (\n SELECT id\n FROM leaf_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs_fri.*\n " + }, "a19b7137403c5cdf1be5f5122ce4d297ed661fa8bdb3bc91f8a81fe9da47469e": { "describe": { "columns": [ @@ -6592,6 +6997,22 @@ }, "query": "\n SELECT upgrade_tx_hash FROM protocol_versions\n WHERE id = $1\n " }, + "a1a6b52403c1db35c8d83d0a512ac453ecd54b34ec516027d540ee1890b40291": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, created_at) VALUES ($1, $2, $3, $4, $5, now()) ON CONFLICT(id) DO NOTHING" + }, "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { "columns": [ @@ -6737,28 +7158,38 @@ }, "query": "SELECT transactions.hash, transactions.received_at FROM transactions LEFT JOIN miniblocks ON miniblocks.number = miniblock_number WHERE received_at > $1 ORDER BY received_at ASC LIMIT $2" }, - "a5f23ec9759a7d8bc02125a67d6139bb885cc80225519346d4c7ecfe45c59704": { + "a5115658f3a53462a9570fd6676f1931604d1c17a9a2b5f1475519006aaf03ba": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + } + }, + "query": "INSERT INTO proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) VALUES ($1, 'ready_to_be_proven', $2, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" + }, + "a7abde5a53248d6e63aa998acac521194231bbe08140c9c4efa548c4f3ae17fa": { "describe": { "columns": [ { - "name": "id", + "name": "max?", "ordinal": 0, "type_info": "Int4" } ], "nullable": [ - false + null ], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Bytea", - "Bytea" + "Int8" ] } }, - "query": "\n SELECT id\n FROM protocol_versions\n WHERE recursion_circuits_set_vks_hash = $1\n AND recursion_leaf_level_vk_hash = $2\n AND recursion_node_level_vk_hash = $3\n AND recursion_scheduler_level_vk_hash = $4\n " + "query": "SELECT MAX(operation_number) as \"max?\" FROM storage_logs WHERE miniblock_number = $1" }, "a8878258bac2876686f1218213457edd70652e8145743b6b44a846220829bbe2": { "describe": { @@ -7143,6 +7574,21 @@ }, "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " }, + "aa279ce3351b30788711be6c65cb99cb14304ac38f8fed6d332237ffafc7c86b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Time", + "Text", + "Int8" + ] + } + }, + "query": "UPDATE proof_compression_jobs_fri SET status = $1, updated_at = now(), time_taken = $2, l1_proof_blob_url = $3WHERE l1_batch_number = $4" + }, "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { "describe": { "columns": [ @@ -7474,26 +7920,18 @@ }, "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " }, - "b18718ad56a6b543df407d4cc5094ff4b1f26a407b07b97c3eee2b2fbf787c76": { + "b14d9a82e6b0a4174dde61642d3abc001cd8cb80d988eb81a685255e3ce920de": { "describe": { - "columns": [ - { - "name": "timestamp", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8", - "Int8" + "Int8Array", + "ByteaArray" ] } }, - "query": "SELECT timestamp FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number" + "query": "UPDATE miniblocks SET hash = u.hash FROM UNNEST($1::bigint[], $2::bytea[]) AS u(number, hash) WHERE miniblocks.number = u.number\n " }, "b479b7d3334f8d4566c294a44e2adb282fbc66a87be5c248c65211c2a8a07db0": { "describe": { @@ -7573,28 +8011,7 @@ "Left": [] } }, - "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" - }, - "b58a33c9c056b58e597a888fbaacb309520dff728ea65f8b7f756ca185f4ae57": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea" - ] - } - }, - "query": "INSERT INTO protocol_versions\n (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash,\n recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash,\n default_account_code_hash, verifier_address, upgrade_tx_hash, created_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now())\n " + "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" }, "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { "describe": { @@ -7680,6 +8097,24 @@ }, "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" }, + "b6f9874059c57e5e59f3021936437e9ff71a68065dfc19c295d806d7a9aafc93": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO prover_protocol_versions\n (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash,\n recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, verifier_address, created_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, now())\n " + }, "b79f02c8663c6b99d0aa46b430de32103afa0333e8293cf8661cfc1c3f9fc12e": { "describe": { "columns": [ @@ -8062,85 +8497,27 @@ }, "query": "UPDATE proof_generation_details SET status = 'picked_by_prover', updated_at = now(), prover_taken_at = now() WHERE l1_batch_number = ( SELECT l1_batch_number FROM proof_generation_details WHERE status = 'ready_to_be_proven' OR (status = 'picked_by_prover' AND prover_taken_at < now() - $1::interval) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE SKIP LOCKED ) RETURNING proof_generation_details.l1_batch_number" }, - "bc360f5148a0a8ddb2475068b651781873f757cf46035e0f05cf420f34c738c6": { + "bc4433cdfa499830fe6a6a95759c9fbe343ac25b371c7fa980bfd1b0afc86629": { "describe": { "columns": [ { - "name": "number", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "base_fee_per_gas", - "ordinal": 5, - "type_info": "Numeric" - }, - { - "name": "l1_gas_price", - "ordinal": 6, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 7, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "protocol_version", - "ordinal": 10, - "type_info": "Int4" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true + false ], "parameters": { "Left": [ - "Int8" + "Text", + "Text", + "Text" ] } }, - "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version FROM miniblocks WHERE number = $1" + "query": "UPDATE proof_compression_jobs_fri SET status = $1, attempts = attempts + 1, updated_at = now(), processing_started_at = now(), picked_by = $3 WHERE l1_batch_number = ( SELECT l1_batch_number FROM proof_compression_jobs_fri WHERE status = $2 ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE SKIP LOCKED ) RETURNING proof_compression_jobs_fri.l1_batch_number" }, "be824de76050461afe29dfd229e524bdf113eab3ca24208782c200531db1c940": { "describe": { @@ -8191,18 +8568,106 @@ }, "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" }, - "c115b25ea0d6b33331d1737cbc4e37ed44c466782d25f3d9c5519dd886f103ee": { + "c178e1574d2a16cb90bcc5d5333a4f8dd2a69e0c12b4e7e108a8dcc6000669a5": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "protocol_version", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], "parameters": { "Left": [ - "TextArray", + "Int8" + ] + } + }, + "query": "SELECT protocol_version FROM miniblocks WHERE number = $1" + }, + "c1e5f85be88ef0b6ab81daf8dec2011797086a7ec5aeaffe5665ebf9584bf84a": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "scheduler_partial_input_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "status", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 4, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "attempts", + "ordinal": 8, + "type_info": "Int2" + }, + { + "name": "protocol_version", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "picked_by", + "ordinal": 10, + "type_info": "Text" + } + ], + "nullable": [ + false, + false, + false, + true, + true, + true, + false, + false, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int4Array", "Text" ] } }, - "query": "\n INSERT INTO compiler_versions (version, compiler, created_at, updated_at)\n SELECT u.version, $2, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)" + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $2\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs_fri\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs_fri.*\n " }, "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { "describe": { @@ -8516,6 +8981,27 @@ }, "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" }, + "cc20350af9e837ae6b6160be65f88e6b675f62e207252f91f2ce7dcaaddb12b1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO protocol_versions (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash, default_account_code_hash, verifier_address, upgrade_tx_hash, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now())" + }, "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { "describe": { "columns": [], @@ -8567,30 +9053,87 @@ } ], "nullable": [ - null + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" + }, + "d11ff84327058721c3c36bc3371c3139f41e2a2255f64bbc5108c1876848d8bb": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Int4", + "Int4", + "Text", + "Text" + ] + } + }, + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n AND region = $5\n AND zone = $6\n " + }, + "d12724ae2bda6214b68e19dc290281907383926abf5ad471eef89529908b2673": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" + }, + { + "name": "sequence_number", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches" - }, - "d11ff84327058721c3c36bc3371c3139f41e2a2255f64bbc5108c1876848d8bb": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Text", - "Text", - "Int4", - "Int4", - "Text", + "Int4Array", "Text" ] } }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = $1, updated_at = now(), queue_free_slots = $4\n WHERE instance_host = $2::text::inet\n AND instance_port = $3\n AND region = $5\n AND zone = $6\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $2\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND protocol_version = ANY($1)\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, "d5dea31f2a325bb44e8ef2cbbabbeb73fd6996a3e6cb99d62c6b97a4aa49c1ca": { "describe": { @@ -8832,6 +9375,29 @@ }, "query": "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3,\n token_list_decimals = $4, well_known = true, updated_at = now()\n WHERE l1_address = $1\n " }, + "d7060880fe56fd99af7b7ed3f4c7fb9d0858cee30f44c5197821aae83c6c9666": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT id\n FROM prover_protocol_versions\n WHERE recursion_circuits_set_vks_hash = $1\n AND recursion_leaf_level_vk_hash = $2\n AND recursion_node_level_vk_hash = $3\n AND recursion_scheduler_level_vk_hash = $4\n " + }, "d8515595d34dca53e50bbd4ed396f6208e33f596195a5ed02fba9e8364ceb33c": { "describe": { "columns": [ @@ -9034,6 +9600,26 @@ }, "query": "UPDATE gpu_prover_queue_fri SET instance_status = $1, updated_at = now() WHERE instance_host = $2::text::inet AND instance_port = $3 AND zone = $4\n " }, + "de960625b0fa0b766aacab74473fcd0332a3f7dc356648452a6a63189a8b7cc3": { + "describe": { + "columns": [ + { + "name": "protocol_version", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT protocol_version FROM witness_inputs_fri WHERE l1_batch_number = $1" + }, "deaf3789ac968e299fe0e5a7f1c72494af8ecd664da9c901ec9c0c5e7c29bb65": { "describe": { "columns": [], @@ -9139,72 +9725,32 @@ }, "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "e1235572a080ee86724da2ad5f528e27e6442ad47abd22e04af8efec2c59432b": { + "e05a8c74653afc78c892ddfd08e60ab040d2b2f7c4b5ee110988eac2dd0dd90d": { "describe": { "columns": [ { - "name": "id", + "name": "timestamp", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "virtual_blocks", "ordinal": 1, "type_info": "Int8" - }, - { - "name": "circuit_id", - "ordinal": 2, - "type_info": "Int2" - }, - { - "name": "aggregation_round", - "ordinal": 3, - "type_info": "Int2" - }, - { - "name": "sequence_number", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "depth", - "ordinal": 5, - "type_info": "Int4" - }, - { - "name": "is_node_final_proof", - "ordinal": 6, - "type_info": "Bool" } ], "nullable": [ - false, - false, - false, - false, - false, false, false ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, - "e1879cce18ad449d58f02254aa9ae4b115152484187161647d012df798985365": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ "Int8", - "Text" + "Int8" ] } }, - "query": "\n INSERT INTO scheduler_witness_jobs_fri\n (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " + "query": "SELECT timestamp, virtual_blocks FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number" }, "e1ad7a51afef6bd7a95df3294f64b7b1bdc4c4fc7ae5c4195802177986f3e876": { "describe": { @@ -9265,6 +9811,34 @@ }, "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" }, + "e409b39a5e62a3a4ec5d3b6aae4935c13b93129a22ffe6a0b68b5ade1f6082c8": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int8" + ] + } + }, + "query": "SELECT number, hash FROM miniblocks WHERE number >= $1 and protocol_version = $2 ORDER BY number LIMIT $3" + }, "e626aa2efb6ba875a12f2b4e37b0ba8052810e73fa5e2d3280f747f7b89b956f": { "describe": { "columns": [], @@ -9640,6 +10214,20 @@ }, "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" }, + "e9b03a0d79eb40a67eab9bdaac8447fc17922bea89bcc6a89eb8eadf147835fe": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4" + ] + } + }, + "query": "\n INSERT INTO scheduler_witness_jobs_fri\n (l1_batch_number, scheduler_partial_input_blob_url, protocol_version, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " + }, "ea17481cab38d370e06e7cf8598daa39faf4414152456aab89695e3133477d3e": { "describe": { "columns": [ @@ -10583,6 +11171,90 @@ }, "query": "SELECT trace FROM transaction_traces WHERE tx_hash = $1" }, + "fa177254ba516ad1588f4f6960be96706d1f43c23ff1d57ba2bc7bc7148bdcac": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "protocol_version", + "ordinal": 10, + "type_info": "Int4" + }, + { + "name": "virtual_blocks", + "ordinal": 11, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, virtual_blocks\n FROM miniblocks ORDER BY number DESC LIMIT 1" + }, "fa2b4316aaef09e96d93b70f96b129ed123951732e01d63f30b4b292d441ea39": { "describe": { "columns": [ @@ -10803,30 +11475,49 @@ }, "query": "SELECT * FROM eth_txs WHERE id = $1" }, - "fadc14334d48d2b29acd8433245e337655aeb3472c65922b7949ba84b32e9938": { + "fc52c356fd09d82da89a435d08398d9b773494491404b5c84fc14c1c1d374b59": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Bytea", - "Text" + "Int8" ] } }, - "query": "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, $3, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" + "query": "\n UPDATE contract_verification_requests\n SET status = 'successful', updated_at = now()\n WHERE id = $1\n " }, - "fc52c356fd09d82da89a435d08398d9b773494491404b5c84fc14c1c1d374b59": { + "ff7ff36b86b0e8d1cd7280aa447baef172cb054ffe7e1d742c59bf09b4f414cb": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM prover_protocol_versions WHERE id = $1" + }, + "ffc30c35b713dbde170c0369d5b9f741523778a3f396bd6fa9bfd1705fb4c8ac": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Text", "Int8" ] } }, - "query": "\n UPDATE contract_verification_requests\n SET status = 'successful', updated_at = now()\n WHERE id = $1\n " + "query": "UPDATE proof_compression_jobs_fri SET status = $1, updated_at = now() WHERE l1_batch_number = $2" } } \ No newline at end of file diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index d8e928b28382..c0c79c034a68 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -35,6 +35,78 @@ impl BlocksDal<'_, '_> { count == 0 } + pub async fn get_miniblock_hashes_from_date( + &mut self, + timestamp: u64, + limit: u32, + version: ProtocolVersionId, + ) -> Vec<(MiniblockNumber, H256)> { + let number = sqlx::query!( + "SELECT number from miniblocks where timestamp > $1 ORDER BY number ASC LIMIT 1", + timestamp as i64 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .number; + self.storage + .blocks_dal() + .get_miniblocks_since_block(number, limit, version) + .await + } + + pub async fn get_last_miniblocks_for_version( + &mut self, + limit: u32, + version: ProtocolVersionId, + ) -> Vec<(MiniblockNumber, H256)> { + let minibloks = sqlx::query!( + "SELECT number, hash FROM miniblocks WHERE protocol_version = $1 ORDER BY number DESC LIMIT $2", + version as i32, + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .iter() + .map(|block| { + ( + MiniblockNumber(block.number as u32), + H256::from_slice(&block.hash), + ) + }) + .collect(); + + minibloks + } + + pub async fn get_miniblocks_since_block( + &mut self, + number: i64, + limit: u32, + version: ProtocolVersionId, + ) -> Vec<(MiniblockNumber, H256)> { + let minibloks = sqlx::query!( + "SELECT number, hash FROM miniblocks WHERE number >= $1 and protocol_version = $2 ORDER BY number LIMIT $3", + number, + version as i32, + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .iter() + .map(|block| { + ( + MiniblockNumber(block.number as u32), + H256::from_slice(&block.hash), + ) + }) + .collect(); + + minibloks + } + pub async fn get_sealed_l1_batch_number(&mut self) -> L1BatchNumber { let number = sqlx::query!( "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" @@ -286,8 +358,8 @@ impl BlocksDal<'_, '_> { number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \ bootloader_code_hash, default_aa_code_hash, protocol_version, \ - created_at, updated_at \ - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, now(), now())", + virtual_blocks, created_at, updated_at \ + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, now(), now())", miniblock_header.number.0 as i64, miniblock_header.timestamp as i64, miniblock_header.hash.as_bytes(), @@ -306,6 +378,28 @@ impl BlocksDal<'_, '_> { .default_aa .as_bytes(), miniblock_header.protocol_version.map(|v| v as i32), + miniblock_header.virtual_blocks as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn update_hashes(&mut self, number_and_hashes: &[(MiniblockNumber, H256)]) { + let mut numbers = vec![]; + let mut hashes = vec![]; + for (number, hash) in number_and_hashes { + numbers.push(number.0 as i64); + hashes.push(hash.as_bytes().to_vec()); + } + + sqlx::query!( + "UPDATE miniblocks SET hash = u.hash \ + FROM UNNEST($1::bigint[], $2::bytea[]) AS u(number, hash) \ + WHERE miniblocks.number = u.number + ", + &numbers, + &hashes ) .execute(self.storage.conn()) .await @@ -317,7 +411,8 @@ impl BlocksDal<'_, '_> { StorageMiniblockHeader, "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ - bootloader_code_hash, default_aa_code_hash, protocol_version \ + bootloader_code_hash, default_aa_code_hash, protocol_version, \ + virtual_blocks FROM miniblocks \ ORDER BY number DESC \ LIMIT 1", @@ -336,7 +431,8 @@ impl BlocksDal<'_, '_> { StorageMiniblockHeader, "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ - bootloader_code_hash, default_aa_code_hash, protocol_version \ + bootloader_code_hash, default_aa_code_hash, protocol_version, \ + virtual_blocks FROM miniblocks \ WHERE number = $1", miniblock_number.0 as i64, @@ -431,7 +527,7 @@ impl BlocksDal<'_, '_> { .unwrap(); if update_result.rows_affected() == 0 { - vlog::debug!( + tracing::debug!( "L1 batch {} info wasn't updated. Details: root_hash: {:?}, merkle_root_hash: {:?}, \ parent_hash: {:?}, commitment: {:?}, l2_l1_merkle_root: {:?}", number.0 as i64, @@ -1112,16 +1208,28 @@ impl BlocksDal<'_, '_> { &mut self, l1_batch_number: L1BatchNumber, ) -> Option { - { - let row = sqlx::query!( - "SELECT protocol_version FROM l1_batches WHERE number = $1", - l1_batch_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap()?; - row.protocol_version.map(|v| (v as u16).try_into().unwrap()) - } + let row = sqlx::query!( + "SELECT protocol_version FROM l1_batches WHERE number = $1", + l1_batch_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + row.protocol_version.map(|v| (v as u16).try_into().unwrap()) + } + + pub async fn get_miniblock_protocol_version_id( + &mut self, + miniblock_number: MiniblockNumber, + ) -> Option { + let row = sqlx::query!( + "SELECT protocol_version FROM miniblocks WHERE number = $1", + miniblock_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + row.protocol_version.map(|v| (v as u16).try_into().unwrap()) } pub async fn get_miniblock_timestamp( @@ -1137,6 +1245,17 @@ impl BlocksDal<'_, '_> { .unwrap() .map(|row| row.timestamp as u64) } + + pub async fn set_protocol_version_for_pending_miniblocks(&mut self, id: ProtocolVersionId) { + sqlx::query!( + "UPDATE miniblocks SET protocol_version = $1 \ + WHERE l1_batch_number IS NULL", + id as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } } /// These functions should only be used for tests. @@ -1174,7 +1293,7 @@ mod tests { let mut conn = pool.access_storage().await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let mut header = L1BatchHeader::new( @@ -1228,7 +1347,7 @@ mod tests { let mut conn = pool.access_storage().await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let mut header = L1BatchHeader::new( L1BatchNumber(1), diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 3c492065e02c..5e710e3d3344 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -8,7 +8,7 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, vm_trace::Call, web3::types::{BlockHeader, U64}, - zk_evm::zkevm_opcode_defs::system_params, + zkevm_test_harness::zk_evm::zkevm_opcode_defs::system_params, Bytes, L1BatchNumber, L2ChainId, MiniblockNumber, H160, H2048, H256, U256, }; use zksync_utils::bigdecimal_to_u256; @@ -122,6 +122,7 @@ impl BlocksWeb3Dal<'_, '_> { base_fee_per_gas: bigdecimal_to_u256(base_fee_per_gas), timestamp: db_row.get::("timestamp").into(), l1_batch_timestamp, + // TODO: include logs ..api::Block::default() } }); @@ -146,18 +147,18 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_block_tx_count( &mut self, block_id: api::BlockId, - ) -> Result, SqlxError> { + ) -> Result, SqlxError> { let query = format!( - "SELECT l1_tx_count + l2_tx_count as tx_count FROM miniblocks WHERE {}", + "SELECT number, l1_tx_count + l2_tx_count AS tx_count FROM miniblocks WHERE {}", web3_block_where_sql(block_id, 1) ); let query = bind_block_where_sql_params(&block_id, sqlx::query(&query)); - let tx_count: Option = query - .fetch_optional(self.storage.conn()) - .await? - .map(|db_row| db_row.get("tx_count")); - Ok(tx_count.map(|t| (t as u32).into())) + Ok(query.fetch_optional(self.storage.conn()).await?.map(|row| { + let miniblock_number = row.get::("number") as u32; + let tx_count = row.get::("tx_count") as u32; + (MiniblockNumber(miniblock_number), tx_count.into()) + })) } /// Returns hashes of blocks with numbers greater than `from_block` and the number of the last block. @@ -210,6 +211,7 @@ impl BlocksWeb3Dal<'_, '_> { gas_limit: U256::zero(), base_fee_per_gas: None, extra_data: Bytes::default(), + // TODO: include logs logs_bloom: H2048::default(), timestamp: U256::from(row.timestamp), difficulty: U256::zero(), @@ -247,13 +249,26 @@ impl BlocksWeb3Dal<'_, '_> { Ok(block_number) } - pub async fn get_block_timestamp( + /// Returns L1 batch timestamp for either sealed or pending L1 batch. + pub async fn get_expected_l1_batch_timestamp( &mut self, - block_number: MiniblockNumber, + l1_batch_number: L1BatchNumber, ) -> Result, SqlxError> { + let first_miniblock_of_batch = if l1_batch_number.0 == 0 { + MiniblockNumber(0) + } else { + match self + .get_miniblock_range_of_l1_batch(l1_batch_number - 1) + .await? + { + Some((_, miniblock_number)) => miniblock_number + 1, + None => return Ok(None), + } + }; let timestamp = sqlx::query!( - "SELECT timestamp FROM miniblocks WHERE number = $1", - block_number.0 as i64 + "SELECT timestamp FROM miniblocks \ + WHERE number = $1", + first_miniblock_of_batch.0 as i64 ) .fetch_optional(self.storage.conn()) .await? @@ -261,6 +276,20 @@ impl BlocksWeb3Dal<'_, '_> { Ok(timestamp) } + pub async fn get_miniblock_hash( + &mut self, + block_number: MiniblockNumber, + ) -> Result, SqlxError> { + let hash = sqlx::query!( + "SELECT hash FROM miniblocks WHERE number = $1", + block_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| H256::from_slice(&row.hash)); + Ok(hash) + } + pub async fn get_l2_to_l1_logs( &mut self, block_number: L1BatchNumber, @@ -466,13 +495,99 @@ impl BlocksWeb3Dal<'_, '_> { Ok(l1_batch_details.map(api::L1BatchDetails::from)) } } + + pub async fn get_miniblock_for_virtual_block_from( + &mut self, + migration_start_l1_batch_number: u64, + from_virtual_block_number: u64, + ) -> Result, SqlxError> { + // Since virtual blocks are numerated from `migration_start_l1_batch_number` number and not from 0 + // we have to subtract (migration_start_l1_batch_number - 1) from the `from` virtual block + // to find miniblock using query below + let virtual_block_offset = from_virtual_block_number - migration_start_l1_batch_number + 1; + + // In the query below `virtual_block_sum` is actually latest virtual block number, created within this miniblock + // and that can be calculated as sum of all virtual blocks counts, created in previous miniblocks. + // It is considered that all logs are created in the last virtual block of this miniblock, + // that's why we are interested in funding it. + // The goal of this query is to find the first miniblock, which contains given virtual block. + let record = sqlx::query!( + "SELECT number \ + FROM ( \ + SELECT number, sum(virtual_blocks) OVER(ORDER BY number) AS virtual_block_sum \ + FROM miniblocks \ + WHERE l1_batch_number >= $1 \ + ) AS vts \ + WHERE virtual_block_sum >= $2 \ + ORDER BY number LIMIT 1", + migration_start_l1_batch_number as i64, + virtual_block_offset as i64 + ) + .instrument("get_miniblock_for_virtual_block_from") + .with_arg( + "migration_start_l1_batch_number", + &migration_start_l1_batch_number, + ) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; + + let result = record.map(|row| row.number as u32); + + Ok(result) + } + + pub async fn get_miniblock_for_virtual_block_to( + &mut self, + migration_start_l1_batch_number: u64, + to_virtual_block_number: u64, + ) -> Result, SqlxError> { + // Since virtual blocks are numerated from `migration_start_l1_batch_number` number and not from 0 + // we have to subtract (migration_start_l1_batch_number - 1) from the `to` virtual block + // to find miniblock using query below + let virtual_block_offset = to_virtual_block_number - migration_start_l1_batch_number + 1; + + // In the query below `virtual_block_sum` is actually latest virtual block number, created within this miniblock + // and that can be calculated as sum of all virtual blocks counts, created in previous miniblocks. + // It is considered that all logs are created in the last virtual block of this miniblock, + // that's why we are interested in funding it. + // The goal of this query is to find the last miniblock, that contains logs all logs(in the last virtual block), + // created before or in a given virtual block. + let record = sqlx::query!( + "SELECT number \ + FROM ( \ + SELECT number, sum(virtual_blocks) OVER(ORDER BY number) AS virtual_block_sum \ + FROM miniblocks \ + WHERE l1_batch_number >= $1 \ + ) AS vts \ + WHERE virtual_block_sum <= $2 \ + ORDER BY number DESC LIMIT 1", + migration_start_l1_batch_number as i64, + virtual_block_offset as i64 + ) + .instrument("get_miniblock_for_virtual_block_to") + .with_arg( + "migration_start_l1_batch_number", + &migration_start_l1_batch_number, + ) + .report_latency() + .fetch_optional(self.storage.conn()) + .await?; + + let result = record.map(|row| row.number as u32); + + Ok(result) + } } #[cfg(test)] mod tests { use db_test_macro::db_test; - use zksync_types::{block::MiniblockHeader, MiniblockNumber, ProtocolVersion}; - use zksync_utils::miniblock_hash; + use zksync_contracts::BaseSystemContractsHashes; + use zksync_types::{ + block::{miniblock_hash, MiniblockHeader}, + MiniblockNumber, ProtocolVersion, ProtocolVersionId, + }; use super::*; use crate::{tests::create_miniblock_header, ConnectionPool}; @@ -484,7 +599,7 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let header = MiniblockHeader { l1_tx_count: 3, @@ -497,7 +612,12 @@ mod tests { api::BlockId::Number(api::BlockNumber::Earliest), api::BlockId::Number(api::BlockNumber::Latest), api::BlockId::Number(api::BlockNumber::Number(0.into())), - api::BlockId::Hash(miniblock_hash(MiniblockNumber(0))), + api::BlockId::Hash(miniblock_hash( + MiniblockNumber(0), + 0, + H256::zero(), + H256::zero(), + )), ]; for block_id in block_ids { let block = conn @@ -507,16 +627,24 @@ mod tests { let block = block.unwrap().unwrap(); assert!(block.transactions.is_empty()); assert_eq!(block.number, U64::zero()); - assert_eq!(block.hash, miniblock_hash(MiniblockNumber(0))); + assert_eq!( + block.hash, + miniblock_hash(MiniblockNumber(0), 0, H256::zero(), H256::zero()) + ); let tx_count = conn.blocks_web3_dal().get_block_tx_count(block_id).await; - assert_eq!(tx_count.unwrap(), Some(8.into())); + assert_eq!(tx_count.unwrap(), Some((MiniblockNumber(0), 8.into()))); } let non_existing_block_ids = [ api::BlockId::Number(api::BlockNumber::Pending), api::BlockId::Number(api::BlockNumber::Number(1.into())), - api::BlockId::Hash(miniblock_hash(MiniblockNumber(1))), + api::BlockId::Hash(miniblock_hash( + MiniblockNumber(1), + 1, + H256::zero(), + H256::zero(), + )), ]; for block_id in non_existing_block_ids { let block = conn @@ -551,7 +679,7 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(0)) @@ -603,24 +731,109 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(0)) .await; - let hash = miniblock_hash(MiniblockNumber(0)); + let hash = miniblock_hash(MiniblockNumber(0), 0, H256::zero(), H256::zero()); let miniblock_number = conn .blocks_web3_dal() .resolve_block_id(api::BlockId::Hash(hash)) .await; assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(0))); - let hash = miniblock_hash(MiniblockNumber(1)); + let hash = miniblock_hash(MiniblockNumber(1), 1, H256::zero(), H256::zero()); let miniblock_number = conn .blocks_web3_dal() .resolve_block_id(api::BlockId::Hash(hash)) .await; assert_eq!(miniblock_number.unwrap(), None); } + + #[db_test(dal_crate)] + async fn getting_miniblocks_for_virtual_block(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + + let mut header = MiniblockHeader { + number: MiniblockNumber(0), + timestamp: 0, + hash: miniblock_hash(MiniblockNumber(0), 0, H256::zero(), H256::zero()), + l1_tx_count: 0, + l2_tx_count: 0, + base_fee_per_gas: 100, + l1_gas_price: 100, + l2_fair_gas_price: 100, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: Some(ProtocolVersionId::default()), + virtual_blocks: 0, + }; + conn.blocks_dal().insert_miniblock(&header).await; + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(0)) + .await; + + header.number = MiniblockNumber(1); + conn.blocks_dal().insert_miniblock(&header).await; + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(1)) + .await; + + for i in 2..=100 { + header.number = MiniblockNumber(i); + header.virtual_blocks = 5; + + conn.blocks_dal().insert_miniblock(&header).await; + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(i)) + .await; + } + + let virtual_block_ranges = [ + (2, 4), + (20, 24), + (11, 15), + (1, 10), + (88, 99), + (1, 100), + (1000000, 10000000), + ]; + let expected_miniblock_ranges = [ + (Some(2), Some(1)), + (Some(5), Some(5)), + (Some(4), Some(4)), + (Some(2), Some(3)), + (Some(19), Some(20)), + (Some(2), Some(21)), + (None, Some(100)), + ]; + + let inputs_with_expected_values = + IntoIterator::into_iter(virtual_block_ranges).zip(expected_miniblock_ranges); + for ( + (virtual_block_start, virtual_block_end), + (expected_miniblock_from, expected_miniblock_to), + ) in inputs_with_expected_values + { + // migration_start_l1_batch_number = 1 + let miniblock_from = conn + .blocks_web3_dal() + .get_miniblock_for_virtual_block_from(1, virtual_block_start) + .await + .unwrap(); + assert_eq!(miniblock_from, expected_miniblock_from); + + let miniblock_to = conn + .blocks_web3_dal() + .get_miniblock_for_virtual_block_to(1, virtual_block_end) + .await + .unwrap(); + assert_eq!(miniblock_to, expected_miniblock_to); + } + } } diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index b30d80180e03..64229c4d3dee 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -50,13 +50,13 @@ impl ConnectionPoolBuilder { } /// Builds a connection pool from this builder. - pub async fn build(&self) -> ConnectionPool { + pub async fn build(&self) -> anyhow::Result { let database_url = match self.db { - DbVariant::Master => get_master_database_url(), - DbVariant::Replica => get_replica_database_url(), - DbVariant::Prover => get_prover_database_url(), + DbVariant::Master => get_master_database_url()?, + DbVariant::Replica => get_replica_database_url()?, + DbVariant::Prover => get_prover_database_url()?, }; - self.build_inner(&database_url).await + Ok(self.build_inner(&database_url).await) } pub async fn build_inner(&self, database_url: &str) -> ConnectionPool { @@ -78,7 +78,7 @@ impl ConnectionPoolBuilder { .unwrap_or_else(|err| { panic!("Failed connecting to {:?} database: {}", self.db, err); }); - vlog::info!( + tracing::info!( "Created pool for {db:?} database with {max_connections} max connections \ and {statement_timeout:?} statement timeout", db = self.db, @@ -169,7 +169,7 @@ impl ConnectionPool { }; Self::report_connection_error(&connection_err); - vlog::warn!( + tracing::warn!( "Failed to get connection to DB, backing off for {BACKOFF_INTERVAL:?}: {connection_err}" ); tokio::time::sleep(BACKOFF_INTERVAL).await; @@ -216,7 +216,7 @@ mod tests { async fn setting_statement_timeout() { // We cannot use an ordinary test pool here because it isn't created using `ConnectionPoolBuilder`. // Since we don't need to mutate the DB for the test, using a real DB connection is OK. - let database_url = get_test_database_url(); + let database_url = get_test_database_url().unwrap(); let pool = ConnectionPool::builder(DbVariant::Master) .set_statement_timeout(Some(Duration::from_secs(1))) .build_inner(&database_url) diff --git a/core/lib/dal/src/connection/test_pool.rs b/core/lib/dal/src/connection/test_pool.rs index 7a00eea42201..722f389584ac 100644 --- a/core/lib/dal/src/connection/test_pool.rs +++ b/core/lib/dal/src/connection/test_pool.rs @@ -36,7 +36,7 @@ impl fmt::Debug for TestPoolInner { impl TestPoolInner { async fn new() -> Self { - let database_url = crate::get_test_database_url(); + let database_url = crate::get_test_database_url().unwrap(); let connection = PgConnection::connect(&database_url).await.unwrap(); let mut connection = Box::pin(connection); diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index c635d169218b..272da1824c36 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -358,7 +358,8 @@ impl ContractVerificationDal<'_, '_> { INSERT INTO compiler_versions (version, compiler, created_at, updated_at) SELECT u.version, $2, now(), now() FROM UNNEST($1::text[]) - AS u(version)", + AS u(version) + ON CONFLICT (version, compiler) DO NOTHING", &versions, &compiler, ) diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 9550a68dd62b..cac0aa9b0d63 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -217,7 +217,7 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(1)) @@ -292,7 +292,7 @@ mod tests { .delete_miniblocks(MiniblockNumber(0)) .await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; conn.blocks_dal() .insert_miniblock(&create_miniblock_header(1)) diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 2142a66a4ba5..1b38362d6a90 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -21,11 +21,11 @@ impl EventsWeb3Dal<'_, '_> { /// Used to determine if there is more than `offset` logs that satisfies filter. pub async fn get_log_block_number( &mut self, - filter: GetLogsFilter, + filter: &GetLogsFilter, offset: usize, ) -> Result, SqlxError> { { - let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); + let (where_sql, arg_index) = self.build_get_logs_where_clause(filter); let query = format!( r#" @@ -52,7 +52,7 @@ impl EventsWeb3Dal<'_, '_> { let log = query .instrument("get_log_block_number") .report_latency() - .with_arg("filter", &filter) + .with_arg("filter", filter) .with_arg("offset", &offset) .fetch_optional(self.storage.conn()) .await?; diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/core/lib/dal/src/fri_proof_compressor_dal.rs new file mode 100644 index 000000000000..0ca1f435aff2 --- /dev/null +++ b/core/lib/dal/src/fri_proof_compressor_dal.rs @@ -0,0 +1,218 @@ +use sqlx::Row; +use std::collections::HashMap; +use std::str::FromStr; +use std::time::Duration; +use strum::{Display, EnumString}; + +use zksync_types::proofs::{JobCountStatistics, StuckJobs}; +use zksync_types::L1BatchNumber; + +use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct FriProofCompressorDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +#[derive(Debug, EnumString, Display)] +pub enum ProofCompressionJobStatus { + #[strum(serialize = "queued")] + Queued, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "successful")] + Successful, + #[strum(serialize = "failed")] + Failed, + #[strum(serialize = "sent_to_server")] + SentToServer, + #[strum(serialize = "skipped")] + Skipped, +} + +impl FriProofCompressorDal<'_, '_> { + pub async fn insert_proof_compression_job( + &mut self, + block_number: L1BatchNumber, + fri_proof_blob_url: &str, + ) { + sqlx::query!( + "INSERT INTO proof_compression_jobs_fri(l1_batch_number, fri_proof_blob_url, status, created_at, updated_at) \ + VALUES ($1, $2, $3, now(), now()) \ + ON CONFLICT (l1_batch_number) DO NOTHING", + block_number.0 as i64, + fri_proof_blob_url, + ProofCompressionJobStatus::Queued.to_string(), + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn skip_proof_compression_job(&mut self, block_number: L1BatchNumber) { + sqlx::query!( + "INSERT INTO proof_compression_jobs_fri(l1_batch_number, status, created_at, updated_at) \ + VALUES ($1, $2, now(), now()) \ + ON CONFLICT (l1_batch_number) DO NOTHING", + block_number.0 as i64, + ProofCompressionJobStatus::Skipped.to_string(), + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_next_proof_compression_job( + &mut self, + picked_by: &str, + ) -> Option { + let result: Option = sqlx::query!( + "UPDATE proof_compression_jobs_fri \ + SET status = $1, attempts = attempts + 1, \ + updated_at = now(), processing_started_at = now(), \ + picked_by = $3 \ + WHERE l1_batch_number = ( \ + SELECT l1_batch_number \ + FROM proof_compression_jobs_fri \ + WHERE status = $2 \ + ORDER BY l1_batch_number ASC \ + LIMIT 1 \ + FOR UPDATE \ + SKIP LOCKED \ + ) \ + RETURNING proof_compression_jobs_fri.l1_batch_number", + ProofCompressionJobStatus::InProgress.to_string(), + ProofCompressionJobStatus::Queued.to_string(), + picked_by, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + result + } + + pub async fn mark_proof_compression_job_successful( + &mut self, + block_number: L1BatchNumber, + time_taken: Duration, + l1_proof_blob_url: &str, + ) { + sqlx::query!( + "UPDATE proof_compression_jobs_fri \ + SET status = $1, updated_at = now(), time_taken = $2, l1_proof_blob_url = $3\ + WHERE l1_batch_number = $4", + ProofCompressionJobStatus::Successful.to_string(), + duration_to_naive_time(time_taken), + l1_proof_blob_url, + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_proof_compression_job_failed( + &mut self, + error: &str, + block_number: L1BatchNumber, + ) { + sqlx::query!( + "UPDATE proof_compression_jobs_fri \ + SET status =$1, error= $2, updated_at = now() \ + WHERE l1_batch_number = $3", + ProofCompressionJobStatus::Failed.to_string(), + error, + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_least_proven_block_number_not_sent_to_server( + &mut self, + ) -> Option<(L1BatchNumber, ProofCompressionJobStatus)> { + let row = sqlx::query!( + "SELECT l1_batch_number, status \ + FROM proof_compression_jobs_fri + WHERE l1_batch_number = ( \ + SELECT MIN(l1_batch_number) \ + FROM proof_compression_jobs_fri \ + WHERE status = $1 OR status = $2 + )", + ProofCompressionJobStatus::Successful.to_string(), + ProofCompressionJobStatus::Skipped.to_string() + ) + .fetch_optional(self.storage.conn()) + .await + .ok()?; + match row { + Some(row) => Some(( + L1BatchNumber(row.l1_batch_number as u32), + ProofCompressionJobStatus::from_str(&row.status).unwrap(), + )), + None => None, + } + } + + pub async fn mark_proof_sent_to_server(&mut self, block_number: L1BatchNumber) { + sqlx::query!( + "UPDATE proof_compression_jobs_fri \ + SET status = $1, updated_at = now() \ + WHERE l1_batch_number = $2", + ProofCompressionJobStatus::SentToServer.to_string(), + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_jobs_stats(&mut self) -> JobCountStatistics { + let mut results: HashMap = sqlx::query( + "SELECT COUNT(*) as \"count\", status as \"status\" \ + FROM proof_compression_jobs_fri \ + GROUP BY status", + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.get("status"), row.get::("count"))) + .collect::>(); + + JobCountStatistics { + queued: results.remove("queued").unwrap_or(0i64) as usize, + in_progress: results.remove("in_progress").unwrap_or(0i64) as usize, + failed: results.remove("failed").unwrap_or(0i64) as usize, + successful: results.remove("successful").unwrap_or(0i64) as usize, + } + } + + pub async fn requeue_stuck_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + { + sqlx::query!( + "UPDATE proof_compression_jobs_fri \ + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() \ + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) \ + OR (status = 'failed' AND attempts < $2) \ + RETURNING l1_batch_number, status, attempts", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.l1_batch_number as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + } +} diff --git a/core/lib/dal/src/fri_protocol_versions_dal.rs b/core/lib/dal/src/fri_protocol_versions_dal.rs new file mode 100644 index 000000000000..8fbcf922d8bb --- /dev/null +++ b/core/lib/dal/src/fri_protocol_versions_dal.rs @@ -0,0 +1,80 @@ +use std::convert::TryFrom; + +use zksync_types::protocol_version::FriProtocolVersionId; +use zksync_types::protocol_version::L1VerifierConfig; + +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct FriProtocolVersionsDal<'a, 'c> { + pub storage: &'a mut StorageProcessor<'c>, +} + +impl FriProtocolVersionsDal<'_, '_> { + pub async fn save_prover_protocol_version( + &mut self, + id: FriProtocolVersionId, + l1_verifier_config: L1VerifierConfig, + ) { + sqlx::query!( + "INSERT INTO prover_fri_protocol_versions \ + (id, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, \ + recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, created_at) \ + VALUES ($1, $2, $3, $4, $5, now()) \ + ON CONFLICT(id) DO NOTHING", + id as i32, + l1_verifier_config + .recursion_scheduler_level_vk_hash + .as_bytes(), + l1_verifier_config + .params + .recursion_node_level_vk_hash + .as_bytes(), + l1_verifier_config + .params + .recursion_leaf_level_vk_hash + .as_bytes(), + l1_verifier_config + .params + .recursion_circuits_set_vks_hash + .as_bytes(), + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn protocol_version_for( + &mut self, + vk_commitments: &L1VerifierConfig, + ) -> Vec { + sqlx::query!( + "SELECT id \ + FROM prover_fri_protocol_versions \ + WHERE recursion_circuits_set_vks_hash = $1 \ + AND recursion_leaf_level_vk_hash = $2 \ + AND recursion_node_level_vk_hash = $3 \ + AND recursion_scheduler_level_vk_hash = $4 \ + ", + vk_commitments + .params + .recursion_circuits_set_vks_hash + .as_bytes(), + vk_commitments + .params + .recursion_leaf_level_vk_hash + .as_bytes(), + vk_commitments + .params + .recursion_node_level_vk_hash + .as_bytes(), + vk_commitments.recursion_scheduler_level_vk_hash.as_bytes(), + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| FriProtocolVersionId::try_from(row.id as u16).unwrap()) + .collect() + } +} diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index 761a128407fc..a6cfd2680d3b 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -1,6 +1,7 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_types::protocol_version::FriProtocolVersionId; use zksync_types::{ proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}, L1BatchNumber, @@ -25,6 +26,7 @@ impl FriProverDal<'_, '_> { circuit_ids_and_urls: Vec<(u8, String)>, aggregation_round: AggregationRound, depth: u16, + protocol_version_id: FriProtocolVersionId, ) { let latency = MethodLatency::new("save_fri_prover_jobs"); for (sequence_number, (circuit_id, circuit_blob_url)) in @@ -38,22 +40,30 @@ impl FriProverDal<'_, '_> { aggregation_round, circuit_blob_url, false, + protocol_version_id, ) .await; } drop(latency); } - pub async fn get_next_job(&mut self) -> Option { + pub async fn get_next_job( + &mut self, + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, + ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); sqlx::query!( " UPDATE prover_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $2 WHERE id = ( SELECT id FROM prover_jobs_fri WHERE status = 'queued' + AND protocol_version = ANY($1) ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC LIMIT 1 FOR UPDATE @@ -63,6 +73,8 @@ impl FriProverDal<'_, '_> { prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth, prover_jobs_fri.is_node_final_proof ", + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -81,11 +93,14 @@ impl FriProverDal<'_, '_> { pub async fn get_next_job_for_circuit_id_round( &mut self, circuits_to_pick: &[CircuitIdRoundTuple], + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, ) -> Option { let circuit_ids: Vec<_> = circuits_to_pick .iter() .map(|tuple| tuple.circuit_id as i16) .collect(); + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let aggregation_rounds: Vec<_> = circuits_to_pick .iter() .map(|tuple| tuple.aggregation_round as i16) @@ -94,7 +109,8 @@ impl FriProverDal<'_, '_> { " UPDATE prover_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $4 WHERE id = ( SELECT id FROM prover_jobs_fri @@ -102,6 +118,7 @@ impl FriProverDal<'_, '_> { AND (circuit_id, aggregation_round) IN ( SELECT * FROM UNNEST($1::smallint[], $2::smallint[]) ) + AND protocol_version = ANY($3) ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC LIMIT 1 FOR UPDATE @@ -113,6 +130,8 @@ impl FriProverDal<'_, '_> { ", &circuit_ids[..], &aggregation_rounds[..], + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -219,11 +238,12 @@ impl FriProverDal<'_, '_> { aggregation_round: AggregationRound, circuit_blob_url: &str, is_node_final_proof: bool, + protocol_version_id: FriProtocolVersionId, ) { sqlx::query!( " - INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now()) + INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now()) ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) DO UPDATE SET updated_at=now() ", @@ -234,6 +254,7 @@ impl FriProverDal<'_, '_> { sequence_number as i64, depth as i32, is_node_final_proof, + protocol_version_id as i32, ) .execute(self.storage.conn()) .await @@ -309,4 +330,34 @@ impl FriProverDal<'_, '_> { .await .unwrap(); } + + pub async fn save_successful_sent_proof(&mut self, l1_batch_number: L1BatchNumber) { + sqlx::query!( + "UPDATE prover_jobs_fri \ + SET status = 'sent_to_server', updated_at = now() \ + WHERE l1_batch_number = $1", + l1_batch_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_scheduler_proof_job_id( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Option { + sqlx::query!( + "SELECT id from prover_jobs_fri \ + WHERE l1_batch_number = $1 \ + AND status = 'successful' \ + AND aggregation_round = $2", + l1_batch_number.0 as i64, + AggregationRound::Scheduler as i16, + ) + .fetch_optional(self.storage.conn()) + .await + .ok()? + .map(|row| row.id as u32) + } } diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs index 801d66f8d4ed..2f5433f0ddb1 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -1,7 +1,9 @@ use sqlx::Row; +use std::convert::TryFrom; use std::{collections::HashMap, time::Duration}; +use zksync_types::protocol_version::FriProtocolVersionId; use zksync_types::{ proofs::{ AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, @@ -36,35 +38,44 @@ pub enum FriWitnessJobStatus { } impl FriWitnessGeneratorDal<'_, '_> { - pub async fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { - { - sqlx::query!( - "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) \ - VALUES ($1, $2, 'queued', now(), now()) + pub async fn save_witness_inputs( + &mut self, + block_number: L1BatchNumber, + object_key: &str, + protocol_version_id: FriProtocolVersionId, + ) { + sqlx::query!( + "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, protocol_version, status, created_at, updated_at) \ + VALUES ($1, $2, $3, 'queued', now(), now()) \ ON CONFLICT (l1_batch_number) DO NOTHING", block_number.0 as i64, object_key, + protocol_version_id as i32, ) .fetch_optional(self.storage.conn()) .await .unwrap(); - } } pub async fn get_next_basic_circuit_witness_job( &mut self, last_l1_batch_to_process: u32, + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let result: Option = sqlx::query!( " UPDATE witness_inputs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $3 WHERE l1_batch_number = ( SELECT l1_batch_number FROM witness_inputs_fri WHERE l1_batch_number <= $1 AND status = 'queued' + AND protocol_version = ANY($2) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -72,7 +83,9 @@ impl FriWitnessGeneratorDal<'_, '_> { ) RETURNING witness_inputs_fri.* ", - last_l1_batch_to_process as i64 + last_l1_batch_to_process as i64, + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -194,6 +207,7 @@ impl FriWitnessGeneratorDal<'_, '_> { closed_form_inputs_and_urls: &Vec<(u8, String, usize)>, scheduler_partial_input_blob_url: &str, base_layer_to_recursive_layer_circuit_id: fn(u8) -> u8, + protocol_version_id: FriProtocolVersionId, ) { { let latency = MethodLatency::new("create_aggregation_jobs_fri"); @@ -203,8 +217,8 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( " INSERT INTO leaf_aggregation_witness_jobs_fri - (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at) - VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now()) + (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now()) ON CONFLICT(l1_batch_number, circuit_id) DO UPDATE SET updated_at=now() ", @@ -212,6 +226,7 @@ impl FriWitnessGeneratorDal<'_, '_> { *circuit_id as i16, closed_form_inputs_url, *number_of_basic_circuits as i32, + protocol_version_id as i32, ) .execute(self.storage.conn()) .await @@ -223,6 +238,7 @@ impl FriWitnessGeneratorDal<'_, '_> { None, 0, "", + protocol_version_id, ) .await; } @@ -230,13 +246,14 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( " INSERT INTO scheduler_witness_jobs_fri - (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at) - VALUES ($1, $2, 'waiting_for_proofs', now(), now()) + (l1_batch_number, scheduler_partial_input_blob_url, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, 'waiting_for_proofs', now(), now()) ON CONFLICT(l1_batch_number) DO UPDATE SET updated_at=now() ", block_number.0 as i64, scheduler_partial_input_blob_url, + protocol_version_id as i32, ) .execute(self.storage.conn()) .await @@ -260,16 +277,23 @@ impl FriWitnessGeneratorDal<'_, '_> { } } - pub async fn get_next_leaf_aggregation_job(&mut self) -> Option { + pub async fn get_next_leaf_aggregation_job( + &mut self, + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, + ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let row = sqlx::query!( " UPDATE leaf_aggregation_witness_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $2 WHERE id = ( SELECT id FROM leaf_aggregation_witness_jobs_fri WHERE status = 'queued' + AND protocol_version = ANY($1) ORDER BY l1_batch_number ASC, id ASC LIMIT 1 FOR UPDATE @@ -277,6 +301,8 @@ impl FriWitnessGeneratorDal<'_, '_> { ) RETURNING leaf_aggregation_witness_jobs_fri.* ", + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -383,16 +409,23 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn get_next_node_aggregation_job(&mut self) -> Option { + pub async fn get_next_node_aggregation_job( + &mut self, + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, + ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let row = sqlx::query!( " UPDATE node_aggregation_witness_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $2 WHERE id = ( SELECT id FROM node_aggregation_witness_jobs_fri WHERE status = 'queued' + AND protocol_version = ANY($1) ORDER BY l1_batch_number ASC, depth ASC, id ASC LIMIT 1 FOR UPDATE @@ -400,6 +433,8 @@ impl FriWitnessGeneratorDal<'_, '_> { ) RETURNING node_aggregation_witness_jobs_fri.* ", + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -463,17 +498,19 @@ impl FriWitnessGeneratorDal<'_, '_> { number_of_dependent_jobs: Option, depth: u16, aggregations_url: &str, + protocol_version_id: FriProtocolVersionId, ) { sqlx::query!( - "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now()) + "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, protocol_version, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', now(), now()) ON CONFLICT(l1_batch_number, circuit_id, depth) DO UPDATE SET updated_at=now()", block_number.0 as i64, circuit_id as i16, depth as i32, aggregations_url, - number_of_dependent_jobs + number_of_dependent_jobs, + protocol_version_id as i32, ) .fetch_optional(self.storage.conn()) .await @@ -628,16 +665,23 @@ impl FriWitnessGeneratorDal<'_, '_> { .collect() } - pub async fn get_next_scheduler_witness_job(&mut self) -> Option { + pub async fn get_next_scheduler_witness_job( + &mut self, + protocol_versions: &[FriProtocolVersionId], + picked_by: &str, + ) -> Option { + let protocol_versions: Vec = protocol_versions.iter().map(|&id| id as i32).collect(); let result: Option = sqlx::query!( " UPDATE scheduler_witness_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now() + updated_at = now(), processing_started_at = now(), + picked_by = $2 WHERE l1_batch_number = ( SELECT l1_batch_number FROM scheduler_witness_jobs_fri WHERE status = 'queued' + AND protocol_version = ANY($1) ORDER BY l1_batch_number ASC LIMIT 1 FOR UPDATE @@ -645,6 +689,8 @@ impl FriWitnessGeneratorDal<'_, '_> { ) RETURNING scheduler_witness_jobs_fri.* ", + &protocol_versions[..], + picked_by, ) .fetch_optional(self.storage.conn()) .await @@ -724,4 +770,22 @@ impl FriWitnessGeneratorDal<'_, '_> { AggregationRound::Scheduler => "scheduler_witness_jobs_fri", } } + + pub async fn protocol_version_for_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> FriProtocolVersionId { + sqlx::query!( + "SELECT protocol_version \ + FROM witness_inputs_fri \ + WHERE l1_batch_number = $1", + l1_batch_number.0 as i64, + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .protocol_version + .map(|id| FriProtocolVersionId::try_from(id as u16).unwrap()) + .unwrap() + } } diff --git a/core/lib/dal/src/instrument.rs b/core/lib/dal/src/instrument.rs index f5b33f2977a1..8599ed0911f2 100644 --- a/core/lib/dal/src/instrument.rs +++ b/core/lib/dal/src/instrument.rs @@ -148,7 +148,7 @@ impl<'a> InstrumentedData<'a> { let output = match output { Ok(output) => output, Err(_) => { - vlog::warn!( + tracing::warn!( "Query {name}{args} called at {file}:{line} is executing for more than {SLOW_QUERY_TIMEOUT:?}", file = location.file(), line = location.line() @@ -165,14 +165,14 @@ impl<'a> InstrumentedData<'a> { } if let Err(err) = &output { - vlog::warn!( + tracing::warn!( "Query {name}{args} called at {file}:{line} has resulted in error: {err}", file = location.file(), line = location.line() ); metrics::increment_counter!("dal.request.error", "method" => name); } else if is_slow { - vlog::info!( + tracing::info!( "Slow query {name}{args} called at {file}:{line} has finished after {elapsed:?}", file = location.file(), line = location.line() diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 556850ee4a44..59ab4a1daa1a 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -6,6 +6,7 @@ use std::env; pub use sqlx::Error as SqlxError; use sqlx::{postgres::Postgres, Connection, PgConnection, Transaction}; // External imports +use anyhow::Context as _; use sqlx::pool::PoolConnection; pub use sqlx::types::BigDecimal; @@ -20,6 +21,8 @@ use crate::eth_sender_dal::EthSenderDal; use crate::events_dal::EventsDal; use crate::events_web3_dal::EventsWeb3Dal; use crate::fri_gpu_prover_queue_dal::FriGpuProverQueueDal; +use crate::fri_proof_compressor_dal::FriProofCompressorDal; +use crate::fri_protocol_versions_dal::FriProtocolVersionsDal; use crate::fri_prover_dal::FriProverDal; use crate::fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal; use crate::fri_witness_generator_dal::FriWitnessGeneratorDal; @@ -50,6 +53,8 @@ pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; pub mod fri_gpu_prover_queue_dal; +pub mod fri_proof_compressor_dal; +pub mod fri_protocol_versions_dal; pub mod fri_prover_dal; pub mod fri_scheduler_dependency_tracker_dal; pub mod fri_witness_generator_dal; @@ -77,23 +82,29 @@ pub mod witness_generator_dal; mod tests; /// Obtains the master database URL from the environment variable. -pub fn get_master_database_url() -> String { - env::var("DATABASE_URL").expect("DATABASE_URL must be set") +pub fn get_master_database_url() -> anyhow::Result { + env::var("DATABASE_URL").context("DATABASE_URL must be set") } /// Obtains the master prover database URL from the environment variable. -pub fn get_prover_database_url() -> String { - env::var("DATABASE_PROVER_URL").unwrap_or_else(|_| get_master_database_url()) +pub fn get_prover_database_url() -> anyhow::Result { + match env::var("DATABASE_PROVER_URL") { + Ok(url) => Ok(url), + Err(_) => get_master_database_url(), + } } /// Obtains the replica database URL from the environment variable. -pub fn get_replica_database_url() -> String { - env::var("DATABASE_REPLICA_URL").unwrap_or_else(|_| get_master_database_url()) +pub fn get_replica_database_url() -> anyhow::Result { + match env::var("DATABASE_REPLICA_URL") { + Ok(url) => Ok(url), + Err(_) => get_master_database_url(), + } } /// Obtains the test database URL from the environment variable. -pub fn get_test_database_url() -> String { - env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set") +pub fn get_test_database_url() -> anyhow::Result { + env::var("TEST_DATABASE_URL").context("TEST_DATABASE_URL must be set") } /// Storage processor is the main storage interaction point. @@ -106,17 +117,21 @@ pub struct StorageProcessor<'a> { } impl<'a> StorageProcessor<'a> { - pub async fn establish_connection(connect_to_master: bool) -> StorageProcessor<'static> { + pub async fn establish_connection( + connect_to_master: bool, + ) -> anyhow::Result> { let database_url = if connect_to_master { - get_master_database_url() + get_master_database_url()? } else { - get_replica_database_url() + get_replica_database_url()? }; - let connection = PgConnection::connect(&database_url).await.unwrap(); - StorageProcessor { + let connection = PgConnection::connect(&database_url) + .await + .context("PgConnectio::connect()")?; + Ok(StorageProcessor { conn: ConnectionHolder::Direct(connection), in_transaction: false, - } + }) } pub async fn start_transaction<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { @@ -279,4 +294,12 @@ impl<'a> StorageProcessor<'a> { pub fn fri_gpu_prover_queue_dal(&mut self) -> FriGpuProverQueueDal<'_, 'a> { FriGpuProverQueueDal { storage: self } } + + pub fn fri_protocol_versions_dal(&mut self) -> FriProtocolVersionsDal<'_, 'a> { + FriProtocolVersionsDal { storage: self } + } + + pub fn fri_proof_compressor_dal(&mut self) -> FriProofCompressorDal<'_, 'a> { + FriProofCompressorDal { storage: self } + } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 2dcef11ce85a..0b48c11756c9 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -91,6 +91,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { .collect() } +// TODO (SMA-1635): Make these fields non optional in database fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, @@ -106,6 +107,7 @@ fn convert_base_system_contracts_hashes( } /// Projection of the `l1_batches` table corresponding to [`L1BatchHeader`] + [`L1BatchMetadata`]. +// TODO(PLA-369): use `#[sqlx(flatten)]` once upgraded to newer `sqlx` #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageL1Batch { pub number: i64, @@ -480,6 +482,12 @@ pub struct StorageMiniblockHeader { pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub protocol_version: Option, + + // The maximal number of virtual blocks that can be created with this miniblock. + // If this value is greater than zero, then at least 1 will be created, but no more than + // min(virtual_blocks, miniblock_number - virtual_block_number), i.e. making sure that virtual blocks + // never go beyond the miniblock they are based on. + pub virtual_blocks: i64, } impl From for MiniblockHeader { @@ -498,6 +506,7 @@ impl From for MiniblockHeader { row.default_aa_code_hash, ), protocol_version: row.protocol_version.map(|v| (v as u16).try_into().unwrap()), + virtual_blocks: row.virtual_blocks as u32, } } } diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index e37d5c7d5376..ed5a732ff792 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -17,6 +17,7 @@ pub struct StorageEthTx { pub predicted_gas_cost: i64, pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, + // TODO (SMA-1614): remove the field pub sent_at_block: Option, } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 4cec6bdce815..127d6442c4ac 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,4 +1,4 @@ -use std::str::FromStr; +use std::{convert::TryInto, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; @@ -27,6 +27,9 @@ pub struct StorageSyncBlock { pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub fee_account_address: Option>, // May be None if the block is not yet sealed + pub protocol_version: i32, + pub virtual_blocks: i64, + pub hash: Vec, } impl StorageSyncBlock { @@ -67,6 +70,7 @@ impl StorageSyncBlock { .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), l1_gas_price: self.l1_gas_price as u64, l2_fair_gas_price: self.l2_fair_gas_price as u64, + // TODO (SMA-1635): Make these filed non optional in database base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: self .bootloader_code_hash @@ -82,6 +86,9 @@ impl StorageSyncBlock { .map(|fee_account_address| Address::from_slice(&fee_account_address)) .unwrap_or(current_operator_address), transactions, + virtual_blocks: Some(self.virtual_blocks as u32), + hash: Some(H256::from_slice(&self.hash)), + protocol_version: (self.protocol_version as u16).try_into().unwrap(), } } } diff --git a/core/lib/dal/src/models/storage_token.rs b/core/lib/dal/src/models/storage_token.rs index 92eeff39e187..8b9779577d44 100644 --- a/core/lib/dal/src/models/storage_token.rs +++ b/core/lib/dal/src/models/storage_token.rs @@ -38,7 +38,7 @@ impl From for Option { }), (None, None) => None, _ => { - vlog::warn!( + tracing::warn!( "Found storage token with {:?} `usd_price` and {:?} `usd_price_updated_at`", price.usd_price, price.usd_price_updated_at diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 65db0e05918a..1cf20a7dc19f 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -2,25 +2,25 @@ use std::{convert::TryInto, str::FromStr}; use crate::BigDecimal; use bigdecimal::Zero; -use serde::{Deserialize, Serialize}; -use sqlx::{ - postgres::PgRow, - types::chrono::{DateTime, NaiveDateTime, Utc}, - Error, FromRow, Row, -}; +use serde::{Deserialize, Serialize}; +use sqlx::postgres::PgRow; +use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; +use sqlx::{Error, FromRow, Row}; + +use zksync_types::l2::TransactionType; +use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; +use zksync_types::transaction_request::PaymasterParams; +use zksync_types::vm_trace::Call; +use zksync_types::web3::types::U64; +use zksync_types::{api, Bytes, ExecuteTransactionCommon}; use zksync_types::{ - api::{self, TransactionDetails, TransactionStatus}, + api::{TransactionDetails, TransactionStatus}, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, - l2::TransactionType, - protocol_version::ProtocolUpgradeTxCommonData, - transaction_request::PaymasterParams, - vm_trace::Call, - web3::types::U64, - Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, - PackedEthSignature, PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, - EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, + Address, Execute, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, PackedEthSignature, + PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, + PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::bigdecimal_to_u256; @@ -303,13 +303,16 @@ impl From for Transaction { common_data: ExecuteTransactionCommon::L1(tx.into()), execute, received_timestamp_ms, + raw_bytes: None, }, Some(t) if t == PROTOCOL_UPGRADE_TX_TYPE as i32 => Transaction { common_data: ExecuteTransactionCommon::ProtocolUpgrade(tx.into()), execute, received_timestamp_ms, + raw_bytes: None, }, _ => Transaction { + raw_bytes: tx.input.clone().map(Bytes::from), common_data: ExecuteTransactionCommon::L2(tx.into()), execute, received_timestamp_ms, @@ -465,7 +468,7 @@ impl From for TransactionDetails { is_l1_originated: tx_details.is_priority, status, fee, - gas_per_pubdata: Some(gas_per_pubdata), + gas_per_pubdata, initiator_address, received_at, eth_commit_tx_hash, diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index 376325f2e910..1f551fd47614 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,7 +1,6 @@ -use sqlx::types::chrono::{DateTime, Utc}; +use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; use std::convert::TryFrom; use std::str::FromStr; -use vlog::__chrono::{NaiveDateTime, NaiveTime}; use zksync_types::proofs::{ AggregationRound, JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, WitnessJobStatusSuccessful, diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 66d9c532a5d0..d5fd3079dc18 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -4,12 +4,25 @@ use zksync_types::L1BatchNumber; use crate::time_utils::pg_interval_from_duration; use crate::{SqlxError, StorageProcessor}; +use strum::{Display, EnumString}; #[derive(Debug)] pub struct ProofGenerationDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, } +#[derive(Debug, EnumString, Display)] +enum ProofGenerationJobStatus { + #[strum(serialize = "ready_to_be_proven")] + ReadyToBeProven, + #[strum(serialize = "picked_by_prover")] + PickedByProver, + #[strum(serialize = "generated")] + Generated, + #[strum(serialize = "skipped")] + Skipped, +} + impl ProofGenerationDal<'_, '_> { pub async fn get_next_block_to_be_proven( &mut self, @@ -59,4 +72,41 @@ impl ProofGenerationDal<'_, '_> { .then_some(()) .ok_or(sqlx::Error::RowNotFound) } + + pub async fn insert_proof_generation_details( + &mut self, + block_number: L1BatchNumber, + proof_gen_data_blob_url: &str, + ) { + sqlx::query!( + "INSERT INTO proof_generation_details \ + (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) \ + VALUES ($1, 'ready_to_be_proven', $2, now(), now()) \ + ON CONFLICT (l1_batch_number) DO NOTHING", + block_number.0 as i64, + proof_gen_data_blob_url, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_proof_generation_job_as_skipped( + &mut self, + block_number: L1BatchNumber, + ) -> Result<(), SqlxError> { + sqlx::query!( + "UPDATE proof_generation_details \ + SET status=$1, updated_at = now() \ + WHERE l1_batch_number = $2", + ProofGenerationJobStatus::Skipped.to_string(), + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await? + .rows_affected() + .eq(&1) + .then_some(()) + .ok_or(sqlx::Error::RowNotFound) + } } diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 65994f82c3b2..7495a98f60d6 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -1,8 +1,8 @@ use std::convert::{TryFrom, TryInto}; -use zksync_contracts::BaseSystemContracts; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolUpgradeTx, ProtocolVersion, VerifierParams}, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::models::storage_protocol_version::{ @@ -16,11 +16,39 @@ pub struct ProtocolVersionsDal<'a, 'c> { } impl ProtocolVersionsDal<'_, '_> { - pub async fn save_protocol_version(&mut self, version: ProtocolVersion) { - let tx_hash = version - .tx - .as_ref() - .map(|tx| tx.common_data.hash().0.to_vec()); + pub async fn save_protocol_version( + &mut self, + id: ProtocolVersionId, + timestamp: u64, + l1_verifier_config: L1VerifierConfig, + base_system_contracts_hashes: BaseSystemContractsHashes, + verifier_address: Address, + tx_hash: Option, + ) { + sqlx::query!( + "INSERT INTO protocol_versions \ + (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, \ + recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash, \ + default_account_code_hash, verifier_address, upgrade_tx_hash, created_at) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now())", + id as i32, + timestamp as i64, + l1_verifier_config.recursion_scheduler_level_vk_hash.as_bytes(), + l1_verifier_config.params.recursion_node_level_vk_hash.as_bytes(), + l1_verifier_config.params.recursion_leaf_level_vk_hash.as_bytes(), + l1_verifier_config.params.recursion_circuits_set_vks_hash.as_bytes(), + base_system_contracts_hashes.bootloader.as_bytes(), + base_system_contracts_hashes.default_aa.as_bytes(), + verifier_address.as_bytes(), + tx_hash.map(|tx_hash| tx_hash.0.to_vec()), + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn save_protocol_version_with_tx(&mut self, version: ProtocolVersion) { + let tx_hash = version.tx.as_ref().map(|tx| tx.common_data.hash()); let mut db_transaction = self.storage.start_transaction().await; if let Some(tx) = version.tx { @@ -30,12 +58,27 @@ impl ProtocolVersionsDal<'_, '_> { .await; } + db_transaction + .protocol_versions_dal() + .save_protocol_version( + version.id, + version.timestamp, + version.l1_verifier_config, + version.base_system_contracts_hashes, + version.verifier_address, + tx_hash, + ) + .await; + + db_transaction.commit().await; + } + + pub async fn save_prover_protocol_version(&mut self, version: ProtocolVersion) { sqlx::query!( - "INSERT INTO protocol_versions + "INSERT INTO prover_protocol_versions (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, - recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, bootloader_code_hash, - default_account_code_hash, verifier_address, upgrade_tx_hash, created_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, now()) + recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, verifier_address, created_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, now()) ", version.id as i32, version.timestamp as i64, @@ -43,16 +86,11 @@ impl ProtocolVersionsDal<'_, '_> { version.l1_verifier_config.params.recursion_node_level_vk_hash.as_bytes(), version.l1_verifier_config.params.recursion_leaf_level_vk_hash.as_bytes(), version.l1_verifier_config.params.recursion_circuits_set_vks_hash.as_bytes(), - version.base_system_contracts_hashes.bootloader.as_bytes(), - version.base_system_contracts_hashes.default_aa.as_bytes(), version.verifier_address.as_bytes(), - tx_hash ) - .execute(db_transaction.conn()) - .await - .unwrap(); - - db_transaction.commit().await; + .execute(self.storage.conn()) + .await + .unwrap(); } pub async fn base_system_contracts_by_timestamp( @@ -125,23 +163,26 @@ impl ProtocolVersionsDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap()?; - let tx = match storage_protocol_version.upgrade_tx_hash.as_ref() { - Some(hash) => Some( - self.storage - .transactions_dal() - .get_tx_by_hash(H256::from_slice(hash.as_slice())) - .await - .unwrap_or_else(|| { - panic!( - "Missing upgrade tx for protocol version {}", - version_id as u16 - ); - }) - .try_into() - .unwrap(), - ), - None => None, - }; + let tx = self + .get_protocol_upgrade_tx((storage_protocol_version.id as u16).try_into().unwrap()) + .await; + + Some(protocol_version_from_storage(storage_protocol_version, tx)) + } + + pub async fn get_protocol_version( + &mut self, + version_id: ProtocolVersionId, + ) -> Option { + let storage_protocol_version: StorageProtocolVersion = sqlx::query_as!( + StorageProtocolVersion, + "SELECT * FROM protocol_versions WHERE id = $1", + version_id as i32 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + let tx = self.get_protocol_upgrade_tx(version_id).await; Some(protocol_version_from_storage(storage_protocol_version, tx)) } @@ -234,7 +275,7 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" SELECT id - FROM protocol_versions + FROM prover_protocol_versions WHERE recursion_circuits_set_vks_hash = $1 AND recursion_leaf_level_vk_hash = $2 AND recursion_node_level_vk_hash = $3 @@ -261,4 +302,17 @@ impl ProtocolVersionsDal<'_, '_> { .map(|row| ProtocolVersionId::try_from(row.id as u16).unwrap()) .collect() } + + pub async fn prover_protocol_version_exists(&mut self, id: ProtocolVersionId) -> bool { + sqlx::query!( + "SELECT COUNT(*) as \"count!\" FROM prover_protocol_versions \ + WHERE id = $1", + id as i32 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .count + > 0 + } } diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index 29bef1db6ae9..fff52cd26feb 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -456,7 +456,7 @@ impl ProverDal<'_, '_> { .map(|ss| { { // Until statuses are enums - let whitelist = vec!["queued", "in_progress", "successful", "failed"]; + let whitelist = ["queued", "in_progress", "successful", "failed"]; if !ss.iter().all(|x| whitelist.contains(&x.as_str())) { panic!("Forbidden value in statuses list.") } diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 90e05106b794..ad9a6f401f47 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -1,4 +1,5 @@ use sqlx::types::chrono::Utc; +use sqlx::Row; use std::{collections::HashMap, time::Instant}; @@ -73,13 +74,15 @@ impl StorageLogsDal<'_, '_> { logs: &[(H256, Vec)], ) { let operation_number = sqlx::query!( - "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1", + "SELECT MAX(operation_number) as \"max?\" FROM storage_logs WHERE miniblock_number = $1", block_number.0 as i64 ) .fetch_one(self.storage.conn()) .await .unwrap() - .count as u32; + .max + .map(|max| max as u32 + 1) + .unwrap_or(0); self.insert_storage_logs_inner(block_number, logs, operation_number) .await; @@ -91,7 +94,7 @@ impl StorageLogsDal<'_, '_> { let modified_keys = self .modified_keys_since_miniblock(last_miniblock_to_keep) .await; - vlog::info!( + tracing::info!( "Loaded {} keys changed after miniblock #{last_miniblock_to_keep} in {:?}", modified_keys.len(), stage_start.elapsed() @@ -101,7 +104,7 @@ impl StorageLogsDal<'_, '_> { let prev_values = self .get_storage_values(&modified_keys, last_miniblock_to_keep) .await; - vlog::info!( + tracing::info!( "Loaded previous storage values for modified keys in {:?}", stage_start.elapsed() ); @@ -118,7 +121,7 @@ impl StorageLogsDal<'_, '_> { keys_to_delete.push(key.as_bytes()); } } - vlog::info!( + tracing::info!( "Created revert plan (keys to update: {}, to delete: {}) in {:?}", keys_to_update.len(), keys_to_delete.len(), @@ -133,7 +136,7 @@ impl StorageLogsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - vlog::info!( + tracing::info!( "Removed {} keys in {:?}", keys_to_delete.len(), stage_start.elapsed() @@ -150,7 +153,7 @@ impl StorageLogsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - vlog::info!( + tracing::info!( "Updated {} keys to previous values in {:?}", keys_to_update.len(), stage_start.elapsed() @@ -254,7 +257,7 @@ impl StorageLogsDal<'_, '_> { let stage_start = Instant::now(); let mut modified_keys = self.modified_keys_since_miniblock(last_miniblock).await; let modified_keys_count = modified_keys.len(); - vlog::info!( + tracing::info!( "Fetched {modified_keys_count} keys changed after miniblock #{last_miniblock} in {:?}", stage_start.elapsed() ); @@ -265,7 +268,7 @@ impl StorageLogsDal<'_, '_> { // the incorrect state after revert. let stage_start = Instant::now(); let l1_batch_by_key = self.get_l1_batches_for_initial_writes(&modified_keys).await; - vlog::info!( + tracing::info!( "Loaded initial write info for modified keys in {:?}", stage_start.elapsed() ); @@ -286,13 +289,13 @@ impl StorageLogsDal<'_, '_> { Some(_) => true, } }); - vlog::info!( + tracing::info!( "Filtered modified keys per initial writes in {:?}", stage_start.elapsed() ); let deduped_count = modified_keys_count - l1_batch_by_key.len(); - vlog::info!( + tracing::info!( "Keys to update: {update_count}, to delete: {delete_count}; {deduped_count} modified keys \ are deduped and will be ignored", update_count = modified_keys.len(), @@ -303,7 +306,7 @@ impl StorageLogsDal<'_, '_> { let prev_values_for_updated_keys = self .get_storage_values(&modified_keys, last_miniblock) .await; - vlog::info!( + tracing::info!( "Loaded previous values for {} keys in {:?}", prev_values_for_updated_keys.len(), stage_start.elapsed() @@ -371,7 +374,7 @@ impl StorageLogsDal<'_, '_> { } /// Returns current values for the specified keys at the specified `miniblock_number`. - async fn get_storage_values( + pub async fn get_storage_values( &mut self, hashed_keys: &[H256], miniblock_number: MiniblockNumber, @@ -426,6 +429,90 @@ impl StorageLogsDal<'_, '_> { }) .collect() } + + pub async fn get_miniblock_storage_logs( + &mut self, + miniblock_number: MiniblockNumber, + ) -> Vec<(H256, H256, u32)> { + self.get_miniblock_storage_logs_from_table(miniblock_number, "storage_logs") + .await + } + + pub async fn retain_storage_logs( + &mut self, + miniblock_number: MiniblockNumber, + operation_numbers: &[i32], + ) { + sqlx::query!( + "DELETE FROM storage_logs \ + WHERE miniblock_number = $1 AND operation_number != ALL($2)", + miniblock_number.0 as i64, + &operation_numbers + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + /// Loads (hashed_key, value, operation_number) tuples for given miniblock_number. + /// Uses provided DB table. + /// Shouldn't be used in production. + pub async fn get_miniblock_storage_logs_from_table( + &mut self, + miniblock_number: MiniblockNumber, + table_name: &str, + ) -> Vec<(H256, H256, u32)> { + sqlx::query(&format!( + "SELECT hashed_key, value, operation_number FROM {table_name} \ + WHERE miniblock_number = $1 \ + ORDER BY operation_number" + )) + .bind(miniblock_number.0 as i64) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let hashed_key = H256::from_slice(row.get("hashed_key")); + let value = H256::from_slice(row.get("value")); + let operation_number: u32 = row.get::("operation_number") as u32; + (hashed_key, value, operation_number) + }) + .collect() + } + + /// Loads value for given hashed_key at given miniblock_number. + /// Uses provided DB table. + /// Shouldn't be used in production. + pub async fn get_storage_value_from_table( + &mut self, + hashed_key: H256, + miniblock_number: MiniblockNumber, + table_name: &str, + ) -> H256 { + let query_str = format!( + "SELECT value FROM {table_name} \ + WHERE hashed_key = $1 AND miniblock_number <= $2 \ + ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1", + ); + sqlx::query(&query_str) + .bind(hashed_key.as_bytes()) + .bind(miniblock_number.0 as i64) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| H256::from_slice(row.get("value"))) + .unwrap_or_else(H256::zero) + } + + /// Vacuums `storage_logs` table. + /// Shouldn't be used in production. + pub async fn vacuum_storage_logs(&mut self) { + sqlx::query!("VACUUM storage_logs") + .execute(self.storage.conn()) + .await + .unwrap(); + } } #[cfg(test)] @@ -474,7 +561,7 @@ mod tests { .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let account = AccountTreeId::new(Address::repeat_byte(1)); @@ -562,7 +649,7 @@ mod tests { .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let account = AccountTreeId::new(Address::repeat_byte(1)); @@ -613,7 +700,7 @@ mod tests { .await; conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let account = AccountTreeId::new(Address::repeat_byte(1)); diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 88cf9308cdf1..2b99000a48ae 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,9 +1,7 @@ use crate::StorageProcessor; use sqlx::types::chrono::Utc; use std::collections::HashSet; -use zksync_types::{ - zk_evm::aux_structures::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, -}; +use zksync_types::{AccountTreeId, Address, L1BatchNumber, LogQuery, StorageKey, H256}; use zksync_utils::u256_to_h256; #[derive(Debug)] @@ -54,11 +52,7 @@ impl StorageLogsDedupDal<'_, '_> { .map(|key| StorageKey::raw_hashed_key(key.address(), key.key()).to_vec()) .collect(); - let last_index = self - .max_set_enumeration_index() - .await - .map(|(last_index, _)| last_index) - .unwrap_or(0); + let last_index = self.max_enumeration_index().await.unwrap_or(0); let indices: Vec<_> = ((last_index + 1)..=(last_index + hashed_keys.len() as u64)) .map(|x| x as i64) .collect(); @@ -97,27 +91,19 @@ impl StorageLogsDedupDal<'_, '_> { .collect() } - pub async fn max_set_enumeration_index(&mut self) -> Option<(u64, L1BatchNumber)> { - sqlx::query!( - "SELECT index, l1_batch_number FROM initial_writes \ - WHERE index IS NOT NULL \ - ORDER BY index DESC LIMIT 1", - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| { - ( - row.index.unwrap() as u64, - L1BatchNumber(row.l1_batch_number as u32), - ) - }) + pub async fn max_enumeration_index(&mut self) -> Option { + sqlx::query!("SELECT MAX(index) as \"max?\" FROM initial_writes",) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .max + .map(|max| max as u64) } pub async fn initial_writes_for_batch( &mut self, l1_batch_number: L1BatchNumber, - ) -> Vec<(H256, Option)> { + ) -> Vec<(H256, u64)> { sqlx::query!( "SELECT hashed_key, index FROM initial_writes \ WHERE l1_batch_number = $1 \ @@ -128,36 +114,10 @@ impl StorageLogsDedupDal<'_, '_> { .await .unwrap() .into_iter() - .map(|row| { - ( - H256::from_slice(&row.hashed_key), - row.index.map(|i| i as u64), - ) - }) + .map(|row| (H256::from_slice(&row.hashed_key), row.index as u64)) .collect() } - pub async fn set_indices_for_initial_writes(&mut self, indexed_keys: &[(H256, u64)]) { - let (hashed_keys, indices): (Vec<_>, Vec<_>) = indexed_keys - .iter() - .map(|(hashed_key, index)| (hashed_key.as_bytes(), *index as i64)) - .unzip(); - sqlx::query!( - "UPDATE initial_writes \ - SET index = data_table.index \ - FROM ( \ - SELECT UNNEST($1::bytea[]) as hashed_key, \ - UNNEST($2::bigint[]) as index \ - ) as data_table \ - WHERE initial_writes.hashed_key = data_table.hashed_key", - &hashed_keys as &[&[u8]], - &indices, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - /// Returns `hashed_keys` that are both present in the input and in `initial_writes` table. pub async fn filter_written_slots(&mut self, hashed_keys: &[H256]) -> HashSet { let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); @@ -173,15 +133,4 @@ impl StorageLogsDedupDal<'_, '_> { .map(|row| H256::from_slice(&row.hashed_key)) .collect() } - - // Used only for tests. - pub async fn reset_indices(&mut self) { - sqlx::query!( - "UPDATE initial_writes \ - SET index = NULL", - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 09dcf7d853cb..068e117e42ac 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -38,6 +38,9 @@ impl SyncDal<'_, '_> { miniblocks.l2_fair_gas_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.virtual_blocks, + miniblocks.hash, + miniblocks.protocol_version as "protocol_version!", l1_batches.fee_account_address as "fee_account_address?" FROM miniblocks LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 69843c93382a..e0d84e408787 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; use db_test_macro::db_test; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ - block::{L1BatchHeader, MiniblockHeader}, + block::{miniblock_hash, L1BatchHeader, MiniblockHeader}, fee::{Fee, TransactionExecutionMetrics}, helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -14,7 +14,6 @@ use zksync_types::{ Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; -use zksync_utils::miniblock_hash; use crate::blocks_dal::BlocksDal; use crate::connection::ConnectionPool; @@ -35,7 +34,7 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { MiniblockHeader { number: MiniblockNumber(number), timestamp: 0, - hash: miniblock_hash(MiniblockNumber(number)), + hash: miniblock_hash(MiniblockNumber(number), 0, H256::zero(), H256::zero()), l1_tx_count: 0, l2_tx_count: 0, base_fee_per_gas: 100, @@ -43,6 +42,7 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { l2_fair_gas_price: 100, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::default()), + virtual_blocks: 1, } } @@ -168,7 +168,7 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; let mut protocol_versions_dal = ProtocolVersionsDal { storage }; protocol_versions_dal - .save_protocol_version(Default::default()) + .save_protocol_version_with_tx(Default::default()) .await; let storage = protocol_versions_dal.storage; @@ -273,7 +273,11 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; storage .protocol_versions_dal() - .save_protocol_version(Default::default()) + .save_protocol_version_with_tx(Default::default()) + .await; + storage + .protocol_versions_dal() + .save_prover_protocol_version(Default::default()) .await; let block_number = 1; let header = L1BatchHeader::new( @@ -330,7 +334,11 @@ async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { let protocol_version = ProtocolVersion::default(); storage .protocol_versions_dal() - .save_protocol_version(protocol_version) + .save_protocol_version_with_tx(protocol_version) + .await; + storage + .protocol_versions_dal() + .save_prover_protocol_version(Default::default()) .await; let block_number = 1; let header = L1BatchHeader::new( @@ -388,7 +396,11 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: let protocol_version = ProtocolVersion::default(); storage .protocol_versions_dal() - .save_protocol_version(protocol_version) + .save_protocol_version_with_tx(protocol_version) + .await; + storage + .protocol_versions_dal() + .save_prover_protocol_version(Default::default()) .await; let block_number = 1; let header = L1BatchHeader::new( @@ -463,7 +475,11 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: let protocol_version = ProtocolVersion::default(); storage .protocol_versions_dal() - .save_protocol_version(protocol_version) + .save_protocol_version_with_tx(protocol_version) + .await; + storage + .protocol_versions_dal() + .save_prover_protocol_version(Default::default()) .await; let block_number = 1; let header = L1BatchHeader::new( @@ -545,7 +561,11 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec let protocol_version = ProtocolVersion::default(); storage .protocol_versions_dal() - .save_protocol_version(protocol_version) + .save_protocol_version_with_tx(protocol_version) + .await; + storage + .protocol_versions_dal() + .save_prover_protocol_version(Default::default()) .await; let block_number = 1; let header = L1BatchHeader::new( diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 5760c7b84f2c..51498d23f1e6 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -356,7 +356,7 @@ impl TransactionsDal<'_, '_> { panic!("{}", err); } }; - vlog::debug!( + tracing::debug!( "{:?} l2 transaction {:?} to DB. init_acc {:?} nonce {:?} returned option {:?}", l2_tx_insertion_result, tx_hash, @@ -997,8 +997,8 @@ impl TransactionsDal<'_, '_> { let from_miniblock = transactions_by_miniblock.first().unwrap().0; let to_miniblock = transactions_by_miniblock.last().unwrap().0; - let timestamps = sqlx::query!( - "SELECT timestamp FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number", + let miniblock_data = sqlx::query!( + "SELECT timestamp, virtual_blocks FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number", from_miniblock.0 as i64, to_miniblock.0 as i64, ) @@ -1006,14 +1006,41 @@ impl TransactionsDal<'_, '_> { .await .unwrap(); + let prev_hashes = sqlx::query!( + "SELECT hash FROM miniblocks \ + WHERE number BETWEEN $1 AND $2 \ + ORDER BY number", + from_miniblock.0 as i64 - 1, + to_miniblock.0 as i64 - 1, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + assert_eq!( + miniblock_data.len(), + transactions_by_miniblock.len(), + "Not enough miniblock data retrieved" + ); + assert_eq!( + prev_hashes.len(), + transactions_by_miniblock.len(), + "Not enough previous hashes retrieved" + ); + transactions_by_miniblock .into_iter() - .zip(timestamps) - .map(|((number, txs), row)| MiniblockReexecuteData { - number, - timestamp: row.timestamp as u64, - txs, - }) + .zip(miniblock_data) + .zip(prev_hashes) + .map( + |(((number, txs), miniblock_data_row), prev_hash_row)| MiniblockReexecuteData { + number, + timestamp: miniblock_data_row.timestamp as u64, + prev_block_hash: H256::from_slice(&prev_hash_row.hash), + virtual_blocks: miniblock_data_row.virtual_blocks as u32, + txs, + }, + ) .collect() } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index a56f93d93653..776b9b57f37b 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -94,7 +94,7 @@ impl TransactionsWeb3Dal<'_, '_> { }) // For better compatibility with various clients, we never return null. .or_else(|| Some(Address::default())), - cumulative_gas_used: Default::default(), + cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = db_row.refunded_gas.into(); db_row.gas_limit.map(|val| { @@ -354,8 +354,9 @@ impl TransactionsWeb3Dal<'_, '_> { #[cfg(test)] mod tests { use db_test_macro::db_test; - use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx, ProtocolVersion}; - use zksync_utils::miniblock_hash; + use zksync_types::{ + block::miniblock_hash, fee::TransactionExecutionMetrics, l2::L2Tx, ProtocolVersion, + }; use super::*; use crate::{ @@ -387,7 +388,7 @@ mod tests { async fn getting_transaction(connection_pool: ConnectionPool) { let mut conn = connection_pool.access_test_storage().await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); @@ -396,7 +397,12 @@ mod tests { let block_ids = [ api::BlockId::Number(api::BlockNumber::Latest), api::BlockId::Number(api::BlockNumber::Number(1.into())), - api::BlockId::Hash(miniblock_hash(MiniblockNumber(1))), + api::BlockId::Hash(miniblock_hash( + MiniblockNumber(1), + 0, + H256::zero(), + H256::zero(), + )), ]; let transaction_ids = block_ids .iter() @@ -447,7 +453,7 @@ mod tests { async fn getting_miniblock_transactions(connection_pool: ConnectionPool) { let mut conn = connection_pool.access_test_storage().await; conn.protocol_versions_dal() - .save_protocol_version(ProtocolVersion::default()) + .save_protocol_version_with_tx(ProtocolVersion::default()) .await; let tx = mock_l2_transaction(); let tx_hash = tx.hash(); diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index a672f8495511..02e1fec1ac09 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -478,6 +478,7 @@ impl WitnessGeneratorDal<'_, '_> { VALUES ($1, $2, $3, $4, $5, $6, $7, 'waiting_for_proofs', now(), now()) ", block_number.0 as i64, + // TODO(SMA-1476): remove the below columns once blob is migrated to GCS. vec![], vec![], basic_circuits_blob_url, @@ -509,6 +510,7 @@ impl WitnessGeneratorDal<'_, '_> { VALUES ($1, $2, $3, $4, 'waiting_for_artifacts', now(), now()) ", block_number.0 as i64, + // TODO(SMA-1476): remove the below column once blob is migrated to GCS. vec![], scheduler_witness_blob_url, protocol_version, @@ -716,15 +718,22 @@ impl WitnessGeneratorDal<'_, '_> { .collect()) } - pub async fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { + pub async fn save_witness_inputs( + &mut self, + block_number: L1BatchNumber, + object_key: &str, + protocol_version: Option, + ) { { sqlx::query!( - "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, created_at, updated_at) \ - VALUES ($1, $2, $3, 'queued', now(), now()) + "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, protocol_version, created_at, updated_at) \ + VALUES ($1, $2, $3, 'queued', $4, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING", block_number.0 as i64, + // TODO(SMA-1476): remove the below column once blob is migrated to GCS. vec![], object_key, + protocol_version.map(|v| v as i32), ) .fetch_optional(self.storage.conn()) .await diff --git a/core/lib/eth_client/Cargo.toml b/core/lib/eth_client/Cargo.toml index 58af159aeda2..68ce89d18fdf 100644 --- a/core/lib/eth_client/Cargo.toml +++ b/core/lib/eth_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_eth_client" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,18 +10,17 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_types = { path = "../types", version = "1.0" } -zksync_eth_signer = { path = "../eth_signer", version = "1.0" } -zksync_config = { path = "../config", version = "1.0" } -zksync_contracts = { path = "../contracts", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_types = { path = "../types" } +zksync_eth_signer = { path = "../eth_signer" } +zksync_config = { path = "../config" } +zksync_contracts = { path = "../contracts" } jsonrpc-core = "18" serde = "1.0.90" hex = "0.4" - anyhow = "1.0" -metrics = "0.20" +metrics = "0.21" thiserror = "1" tokio = { version = "1", features = ["full"] } async-trait = "0.1" +tracing = "0.1" diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 4e137f3da063..3df467844a0e 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -289,6 +289,10 @@ impl EthInterface for QueryClient { Ok(logs) } + // TODO (PLA-333): at the moment the latest version of `web3` crate doesn't have `Finalized` variant in `BlockNumber`. + // However, it's already added in github repo and probably will be included in the next released version. + // Scope of PLA-333 includes forking/using crate directly from github, after that we will be able to change + // type of `block_id` from `String` to `BlockId` and use `self.web3.eth().block(block_id)`. async fn block( &self, block_id: String, diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index f014abd78655..f6256d0b8434 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -55,7 +55,7 @@ impl PKSigningClient { let operator_address = PackedEthSignature::address_from_private_key(&operator_private_key) .expect("Failed to get address from private key"); - vlog::info!("Operator address: {:?}", operator_address); + tracing::info!("Operator address: {:?}", operator_address); SigningClient::new( transport, @@ -281,7 +281,7 @@ impl BoundEthInterface for SigningClient { // Verbosity level is set to `error`, since we expect all the transactions to have // a set limit, but don't want to crаsh the application if for some reason in some // place limit was not set. - vlog::error!( + tracing::error!( "No gas limit was set for transaction, using the default limit: {}", FALLBACK_GAS_LIMIT ); diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index 46722ed4ee7e..c4efb0a7ea82 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_eth_signer" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,7 +10,7 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_types = { path = "../types", version = "1.0" } +zksync_types = { path = "../types" } serde = "1.0.90" serde_derive = "1.0.90" @@ -18,6 +18,7 @@ serde_json = "1.0.0" hex = "0.4.2" secp256k1 = "0.27.0" +# TODO (PLA-440): remove parity-crypto parity-crypto = { version = "0.9", features = ["publickey"] } rlp = "0.5" @@ -32,4 +33,3 @@ actix-rt = "2" tokio = { version = "1", features = ["full"] } actix-web = "4.0.0-beta.8" futures = "0.3" - diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index 7c74a7d53bcf..43c2491c1682 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -15,6 +15,7 @@ futures = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tokio = { version = "1", features = ["sync"] } +tracing = "0.1" [dev-dependencies] assert_matches = "1.5.0" diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index d121b7a189b9..fac8ec46dbbb 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -39,7 +39,7 @@ impl HealthStatus { } /// Health of a single component. -#[derive(Debug, Clone, Serialize)] +#[derive(Debug, Clone, Serialize, PartialEq)] pub struct Health { status: HealthStatus, /// Component-specific details allowing to assess whether the component is healthy or not. @@ -158,9 +158,16 @@ pub struct HealthUpdater { } impl HealthUpdater { - /// Updates the health check information. - pub fn update(&self, health: Health) { - self.health_sender.send_replace(health); + /// Updates the health check information, returning if a change occurred from previous state. + /// Note, description change on Health is counted as a change, even if status is the same. + /// I.E. `Health { Ready, None }` to `Health { Ready, Some(_) }` is considered a change. + pub fn update(&self, health: Health) -> bool { + let old_health = self.health_sender.send_replace(health.clone()); + if old_health != health { + tracing::debug!("changed health from {:?} to {:?}", old_health, health); + return true; + } + false } /// Creates a [`ReactiveHealthCheck`] attached to this updater. This allows not retaining the initial health check @@ -226,4 +233,28 @@ mod tests { HealthStatus::Panicked ); } + + #[tokio::test] + async fn updating_health_status_return_value() { + let (health_check, health_updater) = ReactiveHealthCheck::new("test"); + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::NotReady + ); + + let updated = health_updater.update(HealthStatus::Ready.into()); + assert!(updated); + assert_matches!( + health_check.check_health().await.status(), + HealthStatus::Ready + ); + + let updated = health_updater.update(HealthStatus::Ready.into()); + assert!(!updated); + + let health: Health = HealthStatus::Ready.into(); + let health = health.with_details("new details are treated as status change"); + let updated = health_updater.update(health); + assert!(updated); + } } diff --git a/core/lib/mempool/Cargo.toml b/core/lib/mempool/Cargo.toml index 89a16c794978..a8fca9705568 100644 --- a/core/lib/mempool/Cargo.toml +++ b/core/lib/mempool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_mempool" -version = "1.0.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,6 +10,6 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_types = { path = "../types", version = "1.0" } -vlog = { path = "../../lib/vlog", version = "1.0" } -metrics = "0.20" +zksync_types = { path = "../types" } +tracing = "0.1" +metrics = "0.21" diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index b14e90c72eb2..00b552ed5284 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -55,10 +55,11 @@ impl MempoolStore { common_data, execute, received_timestamp_ms, + raw_bytes, } = transaction; match common_data { ExecuteTransactionCommon::L1(data) => { - vlog::trace!("inserting L1 transaction {}", data.serial_id); + tracing::trace!("inserting L1 transaction {}", data.serial_id); self.l1_transactions.insert( data.serial_id, L1Tx { @@ -69,12 +70,13 @@ impl MempoolStore { ); } ExecuteTransactionCommon::L2(data) => { - vlog::trace!("inserting L2 transaction {}", data.nonce); + tracing::trace!("inserting L2 transaction {}", data.nonce); self.insert_l2_transaction( L2Tx { execute, common_data: data, received_timestamp_ms, + raw_bytes, }, &initial_nonces, ); diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index ddb5ae6bc323..70645a37bacf 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -407,6 +407,7 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { common_data: ExecuteTransactionCommon::L1(op_data), execute, received_timestamp_ms: 0, + raw_bytes: None, } } diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 8f05a63c3de0..07f7b67f71e7 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_merkle_tree" -version = "1.0.0" +version = "0.1.0" edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,19 +10,19 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -vlog = { path = "../vlog", version = "1.0" } -zksync_types = { path = "../types", version = "1.0" } -zksync_crypto = { path = "../crypto", version = "1.0" } -zksync_storage = { path = "../storage", version = "1.0", default-features = false } +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "856eedd0a36a2ff2c8d965b0f0186d4bb8465d8c" } +zksync_types = { path = "../types" } +zksync_crypto = { path = "../crypto" } +zksync_storage = { path = "../storage", default-features = false } leb128 = "0.2.5" -metrics = "0.20.1" once_cell = "1.17.1" rayon = "1.3.1" thiserror = "1.0" +tracing = "0.1" [dev-dependencies] -zksync_config = { path = "../config", version = "1.0" } +zksync_config = { path = "../config" } assert_matches = "1.5.0" clap = { version = "4.2.2", features = ["derive"] } diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index 10911229d8d9..5ee7358d6d36 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -8,10 +8,11 @@ use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use tempfile::TempDir; use std::{ - thread, + io, thread, time::{Duration, Instant}, }; +use vise::Registry; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeInstruction, @@ -20,9 +21,8 @@ use zksync_storage::RocksDB; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; mod batch; -mod recorder; -use crate::{batch::WithBatching, recorder::PrintingRecorder}; +use crate::batch::WithBatching; /// CLI for load-testing for the Merkle tree implementation. #[derive(Debug, Parser)] @@ -69,7 +69,7 @@ struct Cli { impl Cli { fn run(self) { println!("Launched with options: {self:?}"); - PrintingRecorder::install(); + let registry = Registry::collect(); let (mut mock_db, mut rocksdb); let mut _temp_dir = None; @@ -146,6 +146,8 @@ impl Cli { }; let elapsed = start.elapsed(); println!("Processed block #{version} in {elapsed:?}, root hash = {root_hash:?}"); + + registry.encode(&mut io::stdout().lock()).unwrap(); } println!("Verifying tree consistency..."); diff --git a/core/lib/merkle_tree/examples/loadtest/recorder.rs b/core/lib/merkle_tree/examples/loadtest/recorder.rs deleted file mode 100644 index 1c86fac6e1fc..000000000000 --- a/core/lib/merkle_tree/examples/loadtest/recorder.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! Simple `metrics::Recorder` implementation that prints information to stdout. - -use metrics::{ - Counter, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Label, Recorder, SharedString, - Unit, -}; - -use std::{ - collections::HashMap, - fmt::{self, Write as _}, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, Mutex, - }, -}; - -type SharedMetadata = Mutex>; - -#[derive(Debug, Clone, Copy)] -enum MetricKind { - Gauge, - Histogram, -} - -impl fmt::Display for MetricKind { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str(match self { - Self::Gauge => "gauge", - Self::Histogram => "histogram", - }) - } -} - -#[derive(Debug)] -struct PrintingMetric { - kind: MetricKind, - key: KeyName, - labels: Vec