From 3b2fb2841ab43f628b14a6570f992b47a0dbb35e Mon Sep 17 00:00:00 2001 From: Shahar Kaminsky Date: Sun, 16 Jul 2023 19:06:40 +0300 Subject: [PATCH] Updating mirrors. Need to update cargo lock, submodules. --- CODEOWNERS | 5 +- Cargo.lock | 1980 ++-- Cargo.toml | 12 +- README.md | 2 +- bors.toml | 10 +- core/CHANGELOG.md | 388 + core/bin/admin-tools/Cargo.toml | 2 +- core/bin/admin-tools/src/application.rs | 2 +- core/bin/admin-tools/src/blocks.rs | 26 +- core/bin/admin-tools/src/prover.rs | 15 +- core/bin/circuit_synthesizer/Cargo.lock | 1634 ++- core/bin/circuit_synthesizer/Cargo.toml | 8 +- .../src/circuit_synthesizer.rs | 68 +- core/bin/circuit_synthesizer/src/main.rs | 45 +- core/bin/contract-verifier/Cargo.toml | 3 +- core/bin/contract-verifier/src/error.rs | 10 +- core/bin/contract-verifier/src/main.rs | 112 +- core/bin/contract-verifier/src/verifier.rs | 185 +- .../bin/contract-verifier/src/zksolc_utils.rs | 31 +- .../contract-verifier/src/zkvyper_utils.rs | 72 + core/bin/external_node/Cargo.toml | 13 +- core/bin/external_node/src/config.rs | 375 + core/bin/external_node/src/main.rs | 254 +- .../Cargo.toml | 12 + .../src/main.rs | 63 + core/bin/prover/Cargo.lock | 1821 +--- core/bin/prover/Cargo.toml | 11 +- core/bin/prover/src/main.rs | 113 +- core/bin/prover/src/prover.rs | 85 +- core/bin/prover/src/socket_listener.rs | 63 +- .../src/synthesized_circuit_provider.rs | 26 +- core/bin/prover_fri/Cargo.toml | 27 + core/bin/prover_fri/src/main.rs | 128 + .../prover_fri/src/prover_job_processor.rs | 399 + .../setup_key_generator_and_server/Cargo.lock | 1044 +- .../setup_key_generator_and_server/Cargo.toml | 10 +- .../bin/system-constants-generator/Cargo.toml | 6 +- .../system-constants-generator/src/main.rs | 4 +- .../system-constants-generator/src/utils.rs | 44 +- core/bin/test_node/Cargo.toml | 40 + core/bin/test_node/README.md | 116 + core/bin/test_node/src/fork.rs | 301 + core/bin/test_node/src/main.rs | 184 + core/bin/test_node/src/node.rs | 657 ++ core/bin/test_node/src/utils.rs | 16 + core/bin/test_node/src/zks.rs | 173 + .../Cargo.toml | 32 + .../README.md | 5 + .../data/verification_basic_10_key.json | 257 + .../data/verification_basic_11_key.json | 257 + .../data/verification_basic_12_key.json | 257 + .../data/verification_basic_13_key.json | 244 + .../data/verification_basic_1_key.json | 283 + .../data/verification_basic_2_key.json | 257 + .../data/verification_basic_3_key.json | 244 + .../data/verification_basic_4_key.json | 257 + .../data/verification_basic_5_key.json | 244 + .../data/verification_basic_6_key.json | 244 + .../data/verification_basic_7_key.json | 257 + .../data/verification_basic_8_key.json | 257 + .../data/verification_basic_9_key.json | 257 + .../data/verification_leaf_10_key.json | 262 + .../data/verification_leaf_11_key.json | 262 + .../data/verification_leaf_12_key.json | 262 + .../data/verification_leaf_13_key.json | 262 + .../data/verification_leaf_14_key.json | 262 + .../data/verification_leaf_15_key.json | 262 + .../data/verification_leaf_1_key.json | 270 + .../data/verification_leaf_2_key.json | 262 + .../data/verification_leaf_3_key.json | 262 + .../data/verification_leaf_4_key.json | 262 + .../data/verification_leaf_5_key.json | 262 + .../data/verification_leaf_6_key.json | 262 + .../data/verification_leaf_7_key.json | 262 + .../data/verification_leaf_8_key.json | 262 + .../data/verification_leaf_9_key.json | 262 + .../data/verification_node_key.json | 262 + .../data/verification_scheduler_key.json | 270 + .../data/witness_artifacts.json | 22 + .../src/in_memory_setup_data_source.rs | 320 + .../src/lib.rs | 216 + .../src/main.rs | 54 + .../src/setup_data_generator.rs | 137 + .../src/utils.rs | 322 + .../src/vk_generator.rs | 48 + core/bin/witness_generator/Cargo.toml | 20 +- core/bin/witness_generator/README.md | 2 +- .../bin/witness_generator/rust-toolchain.toml | 2 + .../witness_generator/src/basic_circuits.rs | 585 +- .../witness_generator/src/leaf_aggregation.rs | 420 +- core/bin/witness_generator/src/main.rs | 104 +- .../witness_generator/src/node_aggregation.rs | 510 +- ...=> precalculated_merkle_paths_provider.rs} | 85 +- core/bin/witness_generator/src/scheduler.rs | 417 +- core/bin/witness_generator/src/utils.rs | 290 +- core/bin/zksync_core/Cargo.toml | 15 +- .../src/api_server/execution_sandbox.rs | 815 -- .../src/api_server/execution_sandbox/apply.rs | 204 + .../src/api_server/execution_sandbox/error.rs | 83 + .../api_server/execution_sandbox/execute.rs | 208 + .../src/api_server/execution_sandbox/mod.rs | 253 + .../api_server/execution_sandbox/validate.rs | 142 + .../execution_sandbox/vm_metrics.rs | 191 + .../src/api_server/explorer/api_decl.rs | 10 +- .../src/api_server/explorer/api_impl.rs | 145 +- .../src/api_server/explorer/mod.rs | 2 +- .../src/api_server/explorer/network_stats.rs | 5 +- .../zksync_core/src/api_server/healthcheck.rs | 23 +- .../src/api_server/tx_sender/mod.rs | 682 +- .../src/api_server/tx_sender/proxy.rs | 78 +- .../src/api_server/web3/api_health_check.rs | 26 + .../api_server/web3/backend_jsonrpc/error.rs | 3 +- .../web3/backend_jsonrpc/namespaces/debug.rs | 87 +- .../web3/backend_jsonrpc/namespaces/en.rs | 40 + .../web3/backend_jsonrpc/namespaces/eth.rs | 502 +- .../web3/backend_jsonrpc/namespaces/mod.rs | 1 + .../web3/backend_jsonrpc/namespaces/zks.rs | 313 +- .../web3/backend_jsonrpc/pub_sub.rs | 18 +- .../api_server/web3/backend_jsonrpsee/mod.rs | 36 + .../backend_jsonrpsee/namespaces/debug.rs | 50 + .../web3/backend_jsonrpsee/namespaces/en.rs | 23 + .../web3/backend_jsonrpsee/namespaces/eth.rs | 159 +- .../web3/backend_jsonrpsee/namespaces/mod.rs | 2 + .../web3/backend_jsonrpsee/namespaces/net.rs | 3 +- .../web3/backend_jsonrpsee/namespaces/web3.rs | 3 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 112 +- .../zksync_core/src/api_server/web3/mod.rs | 263 +- .../src/api_server/web3/namespaces/debug.rs | 121 +- .../src/api_server/web3/namespaces/en.rs | 50 + .../src/api_server/web3/namespaces/eth.rs | 489 +- .../web3/namespaces/eth_subscribe.rs | 96 +- .../src/api_server/web3/namespaces/mod.rs | 26 +- .../src/api_server/web3/namespaces/zks.rs | 339 +- .../src/api_server/web3/pubsub_notifier.rs | 71 +- .../zksync_core/src/api_server/web3/state.rs | 155 +- .../bin/zksync_core/src/bin/block_reverter.rs | 30 +- .../bin/merkle_tree_consistency_checker.rs | 92 +- core/bin/zksync_core/src/bin/rocksdb_util.rs | 27 +- .../src/bin/verified_sources_fetcher.rs | 20 +- core/bin/zksync_core/src/bin/zksync_server.rs | 42 +- .../bin/zksync_core/src/block_reverter/mod.rs | 331 +- .../src/consistency_checker/mod.rs | 40 +- core/bin/zksync_core/src/data_fetchers/mod.rs | 7 +- .../src/data_fetchers/token_list/mock.rs | 6 +- .../src/data_fetchers/token_list/mod.rs | 35 +- .../src/data_fetchers/token_price/mod.rs | 27 +- .../data_fetchers/token_trading_volume/mod.rs | 29 +- .../zksync_core/src/eth_sender/aggregator.rs | 141 +- .../src/eth_sender/block_publish_criterion.rs | 5 +- .../src/eth_sender/eth_tx_aggregator.rs | 82 +- .../src/eth_sender/eth_tx_manager.rs | 295 +- .../src/eth_sender/grafana_metrics.rs | 11 +- core/bin/zksync_core/src/eth_sender/tests.rs | 381 +- core/bin/zksync_core/src/eth_watch/client.rs | 71 +- core/bin/zksync_core/src/eth_watch/mod.rs | 112 +- core/bin/zksync_core/src/eth_watch/tests.rs | 72 +- core/bin/zksync_core/src/fee_monitor/mod.rs | 209 - core/bin/zksync_core/src/fee_ticker/mod.rs | 14 +- core/bin/zksync_core/src/genesis.rs | 139 +- .../src/house_keeper/blocks_state_reporter.rs | 78 +- .../fri_prover_job_retry_manager.rs | 54 + .../house_keeper/fri_prover_queue_monitor.rs | 63 + .../fri_scheduler_circuit_queuer.rs | 57 + ...ri_witness_generator_jobs_retry_manager.rs | 112 + .../fri_witness_generator_queue_monitor.rs | 119 + .../src/house_keeper/gcs_blob_cleaner.rs | 107 +- .../house_keeper/gpu_prover_queue_monitor.rs | 21 +- core/bin/zksync_core/src/house_keeper/mod.rs | 6 + .../src/house_keeper/periodic_job.rs | 8 +- .../house_keeper/prover_job_retry_manager.rs | 22 +- .../src/house_keeper/prover_queue_monitor.rs | 18 +- ...waiting_to_queued_fri_witness_job_mover.rs | 88 + .../waiting_to_queued_witness_job_mover.rs | 39 +- .../witness_generator_queue_monitor.rs | 28 +- .../src/l1_gas_price/gas_adjuster/tests.rs | 1 + .../src/l1_gas_price/main_node_fetcher.rs | 2 + core/bin/zksync_core/src/l1_gas_price/mod.rs | 2 + .../zksync_core/src/l1_gas_price/singleton.rs | 47 + core/bin/zksync_core/src/lib.rs | 962 +- .../src/metadata_calculator/healthcheck.rs | 4 +- .../src/metadata_calculator/helpers.rs | 165 +- .../src/metadata_calculator/metrics.rs | 57 +- .../src/metadata_calculator/mod.rs | 245 +- .../src/metadata_calculator/tests.rs | 387 +- .../src/metadata_calculator/updater.rs | 256 +- .../bin/zksync_core/src/reorg_detector/mod.rs | 13 +- .../src/state_keeper/batch_executor/mod.rs | 224 +- .../state_keeper/batch_executor/tests/mod.rs | 229 +- .../batch_executor/tests/tester.rs | 62 +- .../src/state_keeper/extractors.rs | 272 +- .../zksync_core/src/state_keeper/io/common.rs | 82 +- .../src/state_keeper/io/mempool.rs | 327 +- .../zksync_core/src/state_keeper/io/mod.rs | 244 +- .../src/state_keeper/io/seal_logic.rs | 1080 +- .../src/state_keeper/io/tests/mod.rs | 401 +- .../src/state_keeper/io/tests/tester.rs | 157 +- .../zksync_core/src/state_keeper/keeper.rs | 279 +- .../src/state_keeper/mempool_actor.rs | 35 +- core/bin/zksync_core/src/state_keeper/mod.rs | 83 +- .../seal_criteria/conditional_sealer.rs | 124 +- .../seal_criteria/criteria/function.rs | 67 - .../seal_criteria/criteria/gas.rs | 208 +- .../criteria/geometry_seal_criteria.rs | 116 +- .../seal_criteria/criteria/mod.rs | 22 +- .../seal_criteria/criteria/pubdata_bytes.rs | 124 +- .../seal_criteria/criteria/slots.rs | 42 +- .../seal_criteria/criteria/timeout.rs | 107 - .../criteria/tx_encoding_size.rs | 112 +- .../src/state_keeper/seal_criteria/mod.rs | 250 +- .../zksync_core/src/state_keeper/tests/mod.rs | 296 +- .../src/state_keeper/tests/tester.rs | 150 +- .../bin/zksync_core/src/state_keeper/types.rs | 18 +- .../state_keeper/updates/l1_batch_updates.rs | 58 +- .../state_keeper/updates/miniblock_updates.rs | 137 +- .../src/state_keeper/updates/mod.rs | 112 +- .../src/sync_layer/batch_status_updater.rs | 365 +- .../src/sync_layer/cached_main_node_client.rs | 116 +- .../zksync_core/src/sync_layer/external_io.rs | 229 +- .../bin/zksync_core/src/sync_layer/fetcher.rs | 286 +- .../bin/zksync_core/src/sync_layer/genesis.rs | 16 +- .../src/sync_layer/mock_batch_executor.rs | 107 - core/bin/zksync_core/src/sync_layer/mod.rs | 1 - .../zksync_core/src/sync_layer/sync_action.rs | 150 +- .../zksync_core/src/sync_layer/sync_state.rs | 6 +- .../src/witness_generator/basic_circuits.rs | 304 +- .../src/witness_generator/leaf_aggregation.rs | 108 +- .../src/witness_generator/node_aggregation.rs | 102 +- .../src/witness_generator/scheduler.rs | 115 +- .../src/witness_generator/utils.rs | 33 +- core/lib/basic_types/Cargo.toml | 2 +- core/lib/circuit_breaker/Cargo.toml | 2 +- .../circuit_breaker/src/facet_selectors.rs | 29 +- core/lib/circuit_breaker/src/l1_txs.rs | 4 +- core/lib/circuit_breaker/src/tests/mod.rs | 23 +- core/lib/circuit_breaker/src/vks.rs | 27 +- core/lib/config/Cargo.toml | 2 +- core/lib/config/src/configs/alerts.rs | 43 + core/lib/config/src/configs/api.rs | 88 +- core/lib/config/src/configs/chain.rs | 71 +- .../config/src/configs/circuit_synthesizer.rs | 4 +- .../config/src/configs/contract_verifier.rs | 4 +- core/lib/config/src/configs/contracts.rs | 10 +- core/lib/config/src/configs/database.rs | 58 +- core/lib/config/src/configs/eth_client.rs | 4 +- core/lib/config/src/configs/eth_sender.rs | 53 +- core/lib/config/src/configs/eth_watch.rs | 10 +- core/lib/config/src/configs/fetcher.rs | 10 +- core/lib/config/src/configs/fri_prover.rs | 69 + .../config/src/configs/fri_prover_group.rs | 438 + .../src/configs/fri_witness_generator.rs | 73 + core/lib/config/src/configs/house_keeper.rs | 19 +- core/lib/config/src/configs/mod.rs | 38 +- core/lib/config/src/configs/nfs.rs | 4 +- core/lib/config/src/configs/object_store.rs | 28 +- core/lib/config/src/configs/prover.rs | 21 +- core/lib/config/src/configs/prover_group.rs | 4 +- core/lib/config/src/configs/utils.rs | 8 +- .../config/src/configs/witness_generator.rs | 4 +- core/lib/config/src/constants/blocks.rs | 2 +- core/lib/config/src/lib.rs | 37 - core/lib/contracts/Cargo.toml | 5 +- core/lib/contracts/src/lib.rs | 34 +- core/lib/crypto/Cargo.toml | 2 +- core/lib/crypto/src/hasher/blake2.rs | 22 +- core/lib/crypto/src/hasher/keccak.rs | 28 +- core/lib/crypto/src/hasher/mod.rs | 26 +- core/lib/crypto/src/hasher/sha256.rs | 40 +- core/lib/dal/Cargo.toml | 13 +- core/lib/dal/README.md | 9 +- ...8_create_witness_inputs_fri_table.down.sql | 1 + ...138_create_witness_inputs_fri_table.up.sql | 13 + ...2716_create_prover_jobs_fri_table.down.sql | 1 + ...132716_create_prover_jobs_fri_table.up.sql | 18 + ...30609133146_drop_contract_sources.down.sql | 9 + ...0230609133146_drop_contract_sources.up.sql | 1 + ...oreign_keys_post_prover_migration.down.sql | 2 + ..._foreign_keys_post_prover_migration.up.sql | 2 + ...logs-contract-address-tx-hash-idx.down.sql | 2 + ...e-logs-contract-address-tx-hash-idx.up.sql | 2 + ...0230614081056_add_missing_indices.down.sql | 17 + .../20230614081056_add_missing_indices.up.sql | 17 + ...onstraint_from_witness_inputs_fri.down.sql | 2 + ..._constraint_from_witness_inputs_fri.up.sql | 2 + ...leaf_aggregation_witness_jobs_fri.down.sql | 1 + ...d_leaf_aggregation_witness_jobs_fri.up.sql | 16 + ...ggregation_witness_jobs_fri_table.down.sql | 1 + ..._aggregation_witness_jobs_fri_table.up.sql | 15 + ...rcuits_in_leaf_agg_jobs_fri_table.down.sql | 1 + ...circuits_in_leaf_agg_jobs_fri_table.up.sql | 2 + ...dd_depth_in_prover_jobs_fri_table.down.sql | 1 + ..._add_depth_in_prover_jobs_fri_table.up.sql | 1 + ...20230626060855_vyper_verification.down.sql | 16 + .../20230626060855_vyper_verification.up.sql | 14 + ...x_for_leaf_node_prover_fri_tables.down.sql | 3 + ...dex_for_leaf_node_prover_fri_tables.up.sql | 3 + ...cheduler_dependency_tracker_table.down.sql | 1 + ..._scheduler_dependency_tracker_table.up.sql | 20 + ..._scheduler_witness_jobs_fri_table.down.sql | 1 + ...te_scheduler_witness_jobs_fri_table.up.sql | 11 + ..._scheduler_witness_jobs_fri_table.down.sql | 1 + ...in_scheduler_witness_jobs_fri_table.up.sql | 1 + ...f_column_in_prover_jobs_fri_table.down.sql | 1 + ...oof_column_in_prover_jobs_fri_table.up.sql | 1 + ...mn_in_node_aggregations_fri_table.down.sql | 1 + ...lumn_in_node_aggregations_fri_table.up.sql | 1 + ..._to_remove_autoincrement_sequence.down.sql | 29 + ...ri_to_remove_autoincrement_sequence.up.sql | 43 + ...ces_for_new_prover_related_tables.down.sql | 9 + ...dices_for_new_prover_related_tables.up.sql | 52 + ...l_drop_proof_from_prover_jobs_fri.down.sql | 2 + ...url_drop_proof_from_prover_jobs_fri.up.sql | 2 + core/lib/dal/sqlx-data.json | 9293 +++++++++-------- core/lib/dal/src/blocks_dal.rs | 1695 +-- core/lib/dal/src/blocks_web3_dal.rs | 798 +- core/lib/dal/src/connection/mod.rs | 53 +- core/lib/dal/src/connection/test_pool.rs | 12 +- core/lib/dal/src/eth_sender_dal.rs | 128 +- core/lib/dal/src/events_dal.rs | 474 +- core/lib/dal/src/events_web3_dal.rs | 29 +- .../src/explorer/contract_verification_dal.rs | 240 +- .../dal/src/explorer/explorer_accounts_dal.rs | 50 +- .../dal/src/explorer/explorer_blocks_dal.rs | 26 +- .../dal/src/explorer/explorer_events_dal.rs | 8 +- .../lib/dal/src/explorer/explorer_misc_dal.rs | 29 +- .../src/explorer/explorer_transactions_dal.rs | 100 +- core/lib/dal/src/explorer/mod.rs | 6 +- core/lib/dal/src/fee_monitor_dal.rs | 169 - core/lib/dal/src/fri_prover_dal.rs | 298 + .../fri_scheduler_dependency_tracker_dal.rs | 114 + core/lib/dal/src/fri_witness_generator_dal.rs | 720 ++ core/lib/dal/src/gpu_prover_queue_dal.rs | 32 +- core/lib/dal/src/healthcheck.rs | 5 +- core/lib/dal/src/lib.rs | 86 +- core/lib/dal/src/macro_utils.rs | 20 + core/lib/dal/src/models/mod.rs | 2 + core/lib/dal/src/models/storage_block.rs | 69 +- core/lib/dal/src/models/storage_eth_tx.rs | 6 +- core/lib/dal/src/models/storage_event.rs | 1 + core/lib/dal/src/models/storage_sync.rs | 87 + .../lib/dal/src/models/storage_transaction.rs | 11 +- .../models/storage_verification_request.rs | 48 + core/lib/dal/src/prover_dal.rs | 136 +- core/lib/dal/src/storage_dal.rs | 409 +- core/lib/dal/src/storage_load_dal.rs | 122 - core/lib/dal/src/storage_logs_dal.rs | 858 +- core/lib/dal/src/storage_logs_dedup_dal.rs | 25 +- core/lib/dal/src/storage_web3_dal.rs | 162 +- core/lib/dal/src/sync_dal.rs | 80 + core/lib/dal/src/tests/mod.rs | 356 +- core/lib/dal/src/tokens_dal.rs | 77 +- core/lib/dal/src/tokens_web3_dal.rs | 26 +- core/lib/dal/src/transactions_dal.rs | 161 +- core/lib/dal/src/transactions_web3_dal.rs | 426 +- core/lib/dal/src/witness_generator_dal.rs | 452 +- core/lib/db_storage_provider/Cargo.toml | 14 - core/lib/db_storage_provider/src/lib.rs | 46 - core/lib/db_test_macro/src/lib.rs | 46 +- core/lib/eth_client/Cargo.toml | 2 +- core/lib/eth_client/src/clients/http/query.rs | 22 +- .../eth_client/src/clients/http/signing.rs | 34 +- core/lib/eth_client/src/clients/mock.rs | 40 +- core/lib/eth_client/src/lib.rs | 18 +- core/lib/eth_signer/Cargo.toml | 4 +- core/lib/eth_signer/src/lib.rs | 2 +- core/lib/health_check/Cargo.toml | 5 +- core/lib/health_check/src/lib.rs | 6 +- core/lib/mempool/Cargo.toml | 4 +- core/lib/merkle_tree/Cargo.toml | 37 +- core/lib/merkle_tree/README.md | 118 +- .../examples/loadtest/main.rs | 44 +- .../examples/loadtest/recorder.rs | 53 +- .../src/consistency.rs | 30 +- .../src/domain.rs | 123 +- .../src/errors.rs | 0 .../src/hasher.rs | 61 +- .../merge_join_with_max_predecessor.rs | 351 - core/lib/merkle_tree/src/iter_ext/mod.rs | 31 - core/lib/merkle_tree/src/lib.rs | 302 +- core/lib/merkle_tree/src/metrics.rs | 465 + core/lib/merkle_tree/src/patch.rs | 151 - core/lib/merkle_tree/src/pruning.rs | 402 + core/lib/merkle_tree/src/storage.rs | 212 - .../src/storage/database.rs | 353 +- .../src/storage/mod.rs | 264 +- .../src/storage/patch.rs | 188 +- .../src/storage/proofs.rs | 51 +- core/lib/merkle_tree/src/storage/rocksdb.rs | 346 + .../src/storage/serialization.rs | 0 .../src/storage/tests.rs | 249 +- core/lib/merkle_tree/src/tests.rs | 491 - core/lib/merkle_tree/src/tree_config.rs | 92 - core/lib/merkle_tree/src/types.rs | 746 +- core/lib/merkle_tree/src/utils.rs | 275 +- core/lib/merkle_tree/src/zksync_tree.rs | 543 - .../tests/integration/common.rs | 0 .../tests/integration/consistency.rs | 28 +- .../tests/integration/domain.rs | 72 +- .../tests/integration/main.rs | 0 .../tests/integration/merkle_tree.rs | 219 +- ...ntegration__domain__log-metadata-full.snap | 270 + ...tion__domain__log-metadata-list-short.snap | 806 ++ ...cksdb__db-snapshot-21-chunked-commits.snap | 431 + ...ocksdb__db-snapshot-3-chunked-commits.snap | 386 + ...ocksdb__db-snapshot-8-chunked-commits.snap | 433 + ...on__merkle_tree__rocksdb__db-snapshot.snap | 144 + core/lib/merkle_tree2/Cargo.toml | 33 - core/lib/merkle_tree2/README.md | 75 - core/lib/merkle_tree2/src/lib.rs | 271 - core/lib/merkle_tree2/src/types.rs | 646 -- core/lib/merkle_tree2/src/utils.rs | 255 - .../snapshots/log-metadata-full.json | 267 - .../snapshots/log-metadata-list-short.json | 952 -- core/lib/mini_merkle_tree/Cargo.toml | 14 +- core/lib/mini_merkle_tree/README.md | 16 + core/lib/mini_merkle_tree/benches/tree.rs | 58 + core/lib/mini_merkle_tree/src/lib.rs | 353 +- core/lib/mini_merkle_tree/src/tests.rs | 177 + core/lib/object_store/Cargo.toml | 10 +- core/lib/object_store/src/file.rs | 72 +- core/lib/object_store/src/gcs.rs | 197 +- core/lib/object_store/src/lib.rs | 6 +- core/lib/object_store/src/mock.rs | 23 +- core/lib/object_store/src/objects.rs | 37 +- core/lib/object_store/src/raw.rs | 65 +- core/lib/object_store/tests/integration.rs | 27 +- core/lib/prometheus_exporter/Cargo.toml | 4 +- core/lib/prometheus_exporter/src/lib.rs | 24 +- core/lib/prover_utils/Cargo.toml | 6 +- core/lib/prover_utils/src/lib.rs | 25 +- core/lib/queued_job_processor/Cargo.toml | 4 +- core/lib/queued_job_processor/src/lib.rs | 48 +- core/lib/state/Cargo.toml | 14 +- core/lib/state/README.md | 14 +- core/lib/state/src/cache.rs | 68 + core/lib/state/src/in_memory.rs | 93 + core/lib/state/src/lib.rs | 73 +- core/lib/state/src/postgres.rs | 368 + core/lib/state/src/rocksdb.rs | 434 + core/lib/state/src/secondary_storage.rs | 173 - core/lib/state/src/storage_view.rs | 264 +- core/lib/state/src/test_utils.rs | 120 + core/lib/storage/Cargo.toml | 20 +- core/lib/storage/README.md | 34 +- core/lib/storage/src/db.rs | 409 +- core/lib/storage/src/lib.rs | 2 +- core/lib/storage/src/metrics.rs | 102 + core/lib/storage/src/util.rs | 21 - core/lib/types/Cargo.toml | 24 +- core/lib/types/src/api/en.rs | 55 + core/lib/types/src/{api.rs => api/mod.rs} | 67 +- core/lib/types/src/commitment.rs | 122 +- core/lib/types/src/eth_sender.rs | 6 +- core/lib/types/src/event.rs | 13 +- core/lib/types/src/explorer_api.rs | 87 +- core/lib/types/src/fee.rs | 4 +- core/lib/types/src/l1/mod.rs | 15 +- core/lib/types/src/l2/mod.rs | 12 +- core/lib/types/src/l2_to_l1_log.rs | 15 +- core/lib/types/src/lib.rs | 18 +- core/lib/types/src/proofs.rs | 47 + core/lib/types/src/storage/log.rs | 2 +- core/lib/types/src/storage/mod.rs | 18 +- core/lib/types/src/storage/writes.rs | 32 +- core/lib/types/src/transaction_request.rs | 16 +- core/lib/types/src/tx/tx_execution_info.rs | 28 +- core/lib/utils/Cargo.toml | 7 +- core/lib/utils/src/lib.rs | 1 + core/lib/utils/src/wait_for_tasks.rs | 60 + core/lib/vlog/Cargo.toml | 4 +- core/lib/vlog/src/lib.rs | 27 +- core/lib/vm/Cargo.toml | 26 +- core/lib/vm/fuzz/Cargo.toml | 2 +- .../fuzz/fuzz_targets/deploy_transaction.rs | 2 +- core/lib/vm/src/event_sink.rs | 38 +- core/lib/vm/src/history_recorder.rs | 74 +- core/lib/vm/src/lib.rs | 1 - core/lib/vm/src/oracle_tools.rs | 17 +- core/lib/vm/src/oracles/decommitter.rs | 27 +- core/lib/vm/src/oracles/storage.rs | 49 +- core/lib/vm/src/oracles/tracer/bootloader.rs | 2 +- core/lib/vm/src/oracles/tracer/call.rs | 2 +- core/lib/vm/src/oracles/tracer/mod.rs | 2 +- core/lib/vm/src/oracles/tracer/one_tx.rs | 2 +- .../src/oracles/tracer/transaction_result.rs | 2 +- core/lib/vm/src/oracles/tracer/utils.rs | 2 +- core/lib/vm/src/oracles/tracer/validation.rs | 40 +- core/lib/vm/src/pubdata_utils.rs | 9 +- core/lib/vm/src/refunds.rs | 98 - core/lib/vm/src/storage.rs | 62 - core/lib/vm/src/test_utils.rs | 6 +- core/lib/vm/src/tests/bootloader.rs | 752 +- core/lib/vm/src/transaction_data.rs | 16 +- core/lib/vm/src/utils.rs | 65 +- core/lib/vm/src/vm.rs | 56 +- core/lib/vm/src/vm_with_bootloader.rs | 2 +- core/lib/web3_decl/Cargo.toml | 6 +- core/lib/web3_decl/src/error.rs | 2 + core/lib/web3_decl/src/lib.rs | 3 + core/lib/web3_decl/src/namespaces/debug.rs | 45 + core/lib/web3_decl/src/namespaces/en.rs | 23 + core/lib/web3_decl/src/namespaces/eth.rs | 73 +- core/lib/web3_decl/src/namespaces/mod.rs | 10 +- core/lib/web3_decl/src/namespaces/zks.rs | 62 +- core/lib/web3_decl/src/types.rs | 4 +- .../cross_external_nodes_checker/Cargo.toml | 3 +- .../cross_external_nodes_checker/README.md | 38 +- .../src/checker.rs | 465 +- .../src/config.rs | 121 +- .../src/divergence.rs | 88 + .../src/helpers.rs | 54 +- .../cross_external_nodes_checker/src/main.rs | 46 +- .../src/pubsub_checker.rs | 307 + core/tests/loadnext/Cargo.toml | 4 +- .../src/account/api_request_executor.rs | 55 +- .../src/account/explorer_api_executor.rs | 261 +- core/tests/loadnext/src/account/mod.rs | 197 +- .../loadnext/src/account/pubsub_executor.rs | 113 +- .../src/account/tx_command_executor.rs | 76 +- core/tests/loadnext/src/config.rs | 73 +- core/tests/loadnext/src/constants.rs | 8 +- core/tests/loadnext/src/executor.rs | 113 +- core/tests/loadnext/src/fs_utils.rs | 2 +- core/tests/loadnext/src/main.rs | 25 +- core/tests/loadnext/src/report.rs | 20 +- .../src/report_collector/metrics_collector.rs | 39 +- .../loadnext/src/report_collector/mod.rs | 151 +- .../operation_results_collector.rs | 75 +- .../tests/revert-and-restart.test.ts | 8 +- core/tests/test_account/Cargo.toml | 4 +- core/tests/test_account/src/lib.rs | 20 +- core/tests/testkit/Cargo.toml | 40 - .../testkit/src/commands/gas_price/mod.rs | 224 - .../testkit/src/commands/gas_price/types.rs | 177 - .../testkit/src/commands/gas_price/utils.rs | 336 - core/tests/testkit/src/commands/mod.rs | 4 - .../testkit/src/commands/revert_block.rs | 76 - .../testkit/src/commands/upgrade_contract.rs | 81 - core/tests/testkit/src/commands/utils.rs | 341 - core/tests/testkit/src/eth_provider.rs | 673 -- core/tests/testkit/src/external_commands.rs | 156 - core/tests/testkit/src/main.rs | 72 - core/tests/testkit/src/server_handler.rs | 43 - core/tests/testkit/src/tester.rs | 639 -- core/tests/testkit/src/types.rs | 316 - core/tests/testkit/src/utils.rs | 69 - .../contracts/vyper/CreateForwarder.vy | 12 + .../contracts/vyper/DeployMe.vy | 18 + core/tests/ts-integration/hardhat.config.ts | 13 +- core/tests/ts-integration/package.json | 5 +- .../ts-integration/scripts/compile-yul.ts | 2 +- .../tests/ts-integration/src/context-owner.ts | 2 +- core/tests/ts-integration/src/env.ts | 38 +- core/tests/ts-integration/src/types.ts | 4 + .../ts-integration/tests/api/debug.test.ts | 4 + .../ts-integration/tests/api/explorer.test.ts | 56 +- .../ts-integration/tests/api/web3.test.ts | 51 +- .../ts-integration/tests/contracts.test.ts | 8 +- .../tests/custom-account.test.ts | 17 +- .../tests/custom-erc20-bridge.test.ts | 5 +- core/tests/ts-integration/tests/ether.test.ts | 3 + core/tests/ts-integration/tests/fees.test.ts | 4 +- core/tests/ts-integration/tests/l1.test.ts | 7 - .../ts-integration/tests/l2-weth.test.ts | 181 + .../ts-integration/tests/mempool.test.ts | 17 +- core/tests/vm-benchmark/benches/iai.rs | 1 + .../deployment_benchmarks/event_spam | Bin 0 -> 672 bytes core/tests/vm-benchmark/harness/Cargo.toml | 14 +- .../harness/src/in_memory_storage.rs | 63 - core/tests/vm-benchmark/harness/src/lib.rs | 7 +- .../vm-benchmark/src/compare_iai_results.rs | 9 +- docker/circuit-synthesizer/Dockerfile | 4 +- docker/contract-verifier/Dockerfile | 64 +- .../cross-external-nodes-checker/Dockerfile | 21 + docker/external-node/Dockerfile | 11 +- docker/external-node/entrypoint.sh | 8 + docker/prover-fri/Dockerfile | 30 + docker/prover-gar/Dockerfile | 18 +- docker/prover/Dockerfile | 4 +- docker/witness-generator/Dockerfile | 20 +- docker/zk-environment/Dockerfile | 2 +- docker/zk-rust-nightly-environment/Dockerfile | 4 +- docs/advanced/01_initialization.md | 2 +- docs/advanced/bytecode_compression.md | 92 + docs/advanced/deposit.md | 2 +- docs/advanced/zk_intuition.md | 155 + docs/development.md | 2 +- docs/external-node/04_observability.md | 4 +- .../prepared_configs/mainnet-config.env | 2 +- .../prepared_configs/testnet-config.env | 2 +- docs/setup-dev.md | 2 +- .../many-owners-custom-account.sol | 151 + .../contracts/estimator/estimator.sol | 1 + etc/env/base/alerts.toml | 6 + etc/env/base/chain.toml | 4 +- etc/env/base/contracts.toml | 3 + etc/env/base/database.toml | 9 +- etc/env/base/eth_watch.toml | 1 - etc/env/base/fri_prover.toml | 9 + etc/env/base/fri_prover_group.toml | 14 + etc/env/base/fri_witness_generator.toml | 5 + etc/env/base/house_keeper.toml | 5 + etc/env/base/object_store.toml | 7 + etc/env/base/rust.toml | 4 +- etc/env/ext-node-docker.toml | 85 +- etc/env/ext-node.toml | 83 +- etc/lint-config/sol.js | 2 + etc/tokens/sepolia.json | 6 + infrastructure/zk/src/clean.ts | 8 +- infrastructure/zk/src/config.ts | 54 +- infrastructure/zk/src/contract.ts | 19 +- infrastructure/zk/src/database.ts | 2 - infrastructure/zk/src/docker.ts | 10 +- infrastructure/zk/src/env.ts | 10 + infrastructure/zk/src/init.ts | 2 +- infrastructure/zk/src/run/run.ts | 113 +- infrastructure/zk/src/server.ts | 29 +- infrastructure/zk/src/test/integration.ts | 25 +- sdk/zksync-rs/src/operations/withdraw.rs | 1 - sdk/zksync-web3.js/CHANGELOG.md | 9 + sdk/zksync-web3.js/package.json | 2 +- sdk/zksync-web3.js/src/adapters.ts | 80 +- sdk/zksync-web3.js/src/provider.ts | 246 +- sdk/zksync-web3.js/src/types.ts | 2 + sdk/zksync-web3.js/src/utils.ts | 36 +- yarn.lock | 72 +- 625 files changed, 51720 insertions(+), 33100 deletions(-) create mode 100644 core/bin/contract-verifier/src/zkvyper_utils.rs create mode 100644 core/bin/external_node/src/config.rs create mode 100644 core/bin/l1_tx_effective_gas_price_migration/Cargo.toml create mode 100644 core/bin/l1_tx_effective_gas_price_migration/src/main.rs create mode 100644 core/bin/prover_fri/Cargo.toml create mode 100644 core/bin/prover_fri/src/main.rs create mode 100644 core/bin/prover_fri/src/prover_job_processor.rs create mode 100644 core/bin/test_node/Cargo.toml create mode 100644 core/bin/test_node/README.md create mode 100644 core/bin/test_node/src/fork.rs create mode 100644 core/bin/test_node/src/main.rs create mode 100644 core/bin/test_node/src/node.rs create mode 100644 core/bin/test_node/src/utils.rs create mode 100644 core/bin/test_node/src/zks.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/Cargo.toml create mode 100644 core/bin/vk_setup_data_generator_server_fri/README.md create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/lib.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/main.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/utils.rs create mode 100644 core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs create mode 100644 core/bin/witness_generator/rust-toolchain.toml rename core/bin/witness_generator/src/{precalculated/mod.rs => precalculated_merkle_paths_provider.rs} (79%) delete mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/error.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs create mode 100644 core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs create mode 100644 core/bin/zksync_core/src/api_server/web3/api_health_check.rs create mode 100644 core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs create mode 100644 core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs create mode 100644 core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs create mode 100644 core/bin/zksync_core/src/api_server/web3/namespaces/en.rs delete mode 100644 core/bin/zksync_core/src/fee_monitor/mod.rs create mode 100644 core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs create mode 100644 core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs create mode 100644 core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs create mode 100644 core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs create mode 100644 core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs create mode 100644 core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs create mode 100644 core/bin/zksync_core/src/l1_gas_price/singleton.rs delete mode 100644 core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs delete mode 100644 core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs delete mode 100644 core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs create mode 100644 core/lib/config/src/configs/alerts.rs create mode 100644 core/lib/config/src/configs/fri_prover.rs create mode 100644 core/lib/config/src/configs/fri_prover_group.rs create mode 100644 core/lib/config/src/configs/fri_witness_generator.rs create mode 100644 core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230609133146_drop_contract_sources.down.sql create mode 100644 core/lib/dal/migrations/20230609133146_drop_contract_sources.up.sql create mode 100644 core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.down.sql create mode 100644 core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.up.sql create mode 100644 core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.down.sql create mode 100644 core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.up.sql create mode 100644 core/lib/dal/migrations/20230614081056_add_missing_indices.down.sql create mode 100644 core/lib/dal/migrations/20230614081056_add_missing_indices.up.sql create mode 100644 core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.down.sql create mode 100644 core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.up.sql create mode 100644 core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.down.sql create mode 100644 core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.up.sql create mode 100644 core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230626060855_vyper_verification.down.sql create mode 100644 core/lib/dal/migrations/20230626060855_vyper_verification.up.sql create mode 100644 core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.down.sql create mode 100644 core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.up.sql create mode 100644 core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.down.sql create mode 100644 core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.up.sql create mode 100644 core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.down.sql create mode 100644 core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.up.sql create mode 100644 core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.down.sql create mode 100644 core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.up.sql create mode 100644 core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.down.sql create mode 100644 core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.up.sql create mode 100644 core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.down.sql create mode 100644 core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.up.sql delete mode 100644 core/lib/dal/src/fee_monitor_dal.rs create mode 100644 core/lib/dal/src/fri_prover_dal.rs create mode 100644 core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs create mode 100644 core/lib/dal/src/fri_witness_generator_dal.rs create mode 100644 core/lib/dal/src/macro_utils.rs create mode 100644 core/lib/dal/src/models/storage_sync.rs create mode 100644 core/lib/dal/src/models/storage_verification_request.rs delete mode 100644 core/lib/dal/src/storage_load_dal.rs create mode 100644 core/lib/dal/src/sync_dal.rs delete mode 100644 core/lib/db_storage_provider/Cargo.toml delete mode 100644 core/lib/db_storage_provider/src/lib.rs rename core/lib/{merkle_tree2 => merkle_tree}/examples/loadtest/main.rs (80%) rename core/lib/{merkle_tree2 => merkle_tree}/examples/loadtest/recorder.rs (65%) rename core/lib/{merkle_tree2 => merkle_tree}/src/consistency.rs (93%) rename core/lib/{merkle_tree2 => merkle_tree}/src/domain.rs (71%) rename core/lib/{merkle_tree2 => merkle_tree}/src/errors.rs (100%) rename core/lib/{merkle_tree2 => merkle_tree}/src/hasher.rs (93%) delete mode 100644 core/lib/merkle_tree/src/iter_ext/merge_join_with_max_predecessor.rs delete mode 100644 core/lib/merkle_tree/src/iter_ext/mod.rs create mode 100644 core/lib/merkle_tree/src/metrics.rs delete mode 100644 core/lib/merkle_tree/src/patch.rs create mode 100644 core/lib/merkle_tree/src/pruning.rs delete mode 100644 core/lib/merkle_tree/src/storage.rs rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/database.rs (51%) rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/mod.rs (65%) rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/patch.rs (63%) rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/proofs.rs (94%) create mode 100644 core/lib/merkle_tree/src/storage/rocksdb.rs rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/serialization.rs (100%) rename core/lib/{merkle_tree2 => merkle_tree}/src/storage/tests.rs (53%) delete mode 100644 core/lib/merkle_tree/src/tests.rs delete mode 100644 core/lib/merkle_tree/src/tree_config.rs delete mode 100644 core/lib/merkle_tree/src/zksync_tree.rs rename core/lib/{merkle_tree2 => merkle_tree}/tests/integration/common.rs (100%) rename core/lib/{merkle_tree2 => merkle_tree}/tests/integration/consistency.rs (69%) rename core/lib/{merkle_tree2 => merkle_tree}/tests/integration/domain.rs (85%) rename core/lib/{merkle_tree2 => merkle_tree}/tests/integration/main.rs (100%) rename core/lib/{merkle_tree2 => merkle_tree}/tests/integration/merkle_tree.rs (75%) create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__domain__log-metadata-full.snap create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__domain__log-metadata-list-short.snap create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits.snap create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-3-chunked-commits.snap create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits.snap create mode 100644 core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot.snap delete mode 100644 core/lib/merkle_tree2/Cargo.toml delete mode 100644 core/lib/merkle_tree2/README.md delete mode 100644 core/lib/merkle_tree2/src/lib.rs delete mode 100644 core/lib/merkle_tree2/src/types.rs delete mode 100644 core/lib/merkle_tree2/src/utils.rs delete mode 100644 core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-full.json delete mode 100644 core/lib/merkle_tree2/tests/integration/snapshots/log-metadata-list-short.json create mode 100644 core/lib/mini_merkle_tree/README.md create mode 100644 core/lib/mini_merkle_tree/benches/tree.rs create mode 100644 core/lib/mini_merkle_tree/src/tests.rs create mode 100644 core/lib/state/src/cache.rs create mode 100644 core/lib/state/src/in_memory.rs create mode 100644 core/lib/state/src/postgres.rs create mode 100644 core/lib/state/src/rocksdb.rs delete mode 100644 core/lib/state/src/secondary_storage.rs create mode 100644 core/lib/state/src/test_utils.rs create mode 100644 core/lib/storage/src/metrics.rs delete mode 100644 core/lib/storage/src/util.rs create mode 100644 core/lib/types/src/api/en.rs rename core/lib/types/src/{api.rs => api/mod.rs} (89%) create mode 100644 core/lib/utils/src/wait_for_tasks.rs delete mode 100644 core/lib/vm/src/storage.rs create mode 100644 core/lib/web3_decl/src/namespaces/debug.rs create mode 100644 core/lib/web3_decl/src/namespaces/en.rs create mode 100644 core/tests/cross_external_nodes_checker/src/divergence.rs create mode 100644 core/tests/cross_external_nodes_checker/src/pubsub_checker.rs delete mode 100644 core/tests/testkit/Cargo.toml delete mode 100644 core/tests/testkit/src/commands/gas_price/mod.rs delete mode 100644 core/tests/testkit/src/commands/gas_price/types.rs delete mode 100644 core/tests/testkit/src/commands/gas_price/utils.rs delete mode 100644 core/tests/testkit/src/commands/mod.rs delete mode 100644 core/tests/testkit/src/commands/revert_block.rs delete mode 100644 core/tests/testkit/src/commands/upgrade_contract.rs delete mode 100644 core/tests/testkit/src/commands/utils.rs delete mode 100644 core/tests/testkit/src/eth_provider.rs delete mode 100644 core/tests/testkit/src/external_commands.rs delete mode 100644 core/tests/testkit/src/main.rs delete mode 100644 core/tests/testkit/src/server_handler.rs delete mode 100644 core/tests/testkit/src/tester.rs delete mode 100644 core/tests/testkit/src/types.rs delete mode 100644 core/tests/testkit/src/utils.rs create mode 100644 core/tests/ts-integration/contracts/vyper/CreateForwarder.vy create mode 100644 core/tests/ts-integration/contracts/vyper/DeployMe.vy create mode 100644 core/tests/ts-integration/tests/l2-weth.test.ts create mode 100644 core/tests/vm-benchmark/deployment_benchmarks/event_spam delete mode 100644 core/tests/vm-benchmark/harness/src/in_memory_storage.rs create mode 100644 docker/cross-external-nodes-checker/Dockerfile create mode 100644 docker/external-node/entrypoint.sh create mode 100644 docker/prover-fri/Dockerfile create mode 100644 docs/advanced/bytecode_compression.md create mode 100644 docs/advanced/zk_intuition.md create mode 100644 etc/contracts-test-data/contracts/custom-account/many-owners-custom-account.sol create mode 100644 etc/env/base/alerts.toml create mode 100644 etc/env/base/fri_prover.toml create mode 100644 etc/env/base/fri_prover_group.toml create mode 100644 etc/env/base/fri_witness_generator.toml diff --git a/CODEOWNERS b/CODEOWNERS index c126897f5de9..12cd26187090 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,4 @@ -* @RomanBrodetski @perekopskiy +* @matter-labs/core +.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc +**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc +CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc diff --git a/Cargo.lock b/Cargo.lock index d8919ee451ed..5f0b729cec18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,19 +10,19 @@ checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" [[package]] name = "actix-codec" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a7559404a7f3573127aab53c08ce37a6c6a315c374a31070f3c91cd1b4a7fe" +checksum = "617a8268e3537fe1d8c9ead925fca49ef6400927ee7bc26750e90ecee14ce4b8" dependencies = [ "bitflags 1.3.2", "bytes 1.4.0", "futures-core", "futures-sink", - "log", "memchr", "pin-project-lite", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", + "tracing", ] [[package]] @@ -51,7 +51,7 @@ dependencies = [ "actix-service", "actix-utils", "ahash 0.8.3", - "base64 0.21.0", + "base64 0.21.2", "bitflags 1.3.2", "brotli", "bytes 1.4.0", @@ -74,9 +74,9 @@ dependencies = [ "sha1", "smallvec", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -85,7 +85,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -124,7 +124,7 @@ dependencies = [ "actix-utils", "futures-core", "futures-util", - "mio 0.8.6", + "mio 0.8.8", "num_cpus", "socket2", "tokio", @@ -189,7 +189,7 @@ dependencies = [ "serde_urlencoded", "smallvec", "socket2", - "time 0.3.20", + "time 0.3.22", "url", ] @@ -200,8 +200,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -237,7 +237,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.2.4", + "clap 4.3.4", "dotenvy", "tokio", "zksync_dal", @@ -293,7 +293,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -305,7 +305,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -319,6 +319,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +dependencies = [ + "memchr", +] + [[package]] name = "aliasable" version = "0.1.3" @@ -340,6 +349,12 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -366,9 +381,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ "anstyle", "anstyle-parse", @@ -405,9 +420,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -415,9 +430,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arr_macro" @@ -436,15 +451,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -463,9 +478,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" [[package]] name = "assert_matches" @@ -474,63 +489,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] -name = "async-channel" -version = "1.8.0" +name = "async-compression" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" dependencies = [ - "concurrent-queue", - "event-listener", + "brotli", + "flate2", "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" -dependencies = [ - "async-lock", - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" -dependencies = [ - "async-lock", - "autocfg 1.1.0", - "concurrent-queue", - "futures-lite", - "libc", - "log", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "windows-sys 0.42.0", + "memchr", + "pin-project-lite", + "tokio", + "zstd 0.11.2+zstd.1.5.2", + "zstd-safe 5.0.2+zstd.1.5.2", ] [[package]] @@ -542,68 +513,11 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-native-tls" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9e7a929bd34c68a82d58a4de7f86fffdaf97fb2af850162a7bb19dd7269b33" -dependencies = [ - "async-std", - "native-tls", - "thiserror", - "url", -] - -[[package]] -name = "async-process" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" -dependencies = [ - "async-io", - "async-lock", - "autocfg 1.1.0", - "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "libc", - "signal-hook", - "windows-sys 0.42.0", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils 0.8.15", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -612,30 +526,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] -[[package]] -name = "async-task" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" - [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -647,12 +555,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-waker" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" - [[package]] name = "atty" version = "0.2.14" @@ -681,13 +583,13 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backon" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294" +checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9" dependencies = [ - "futures 0.3.27", + "fastrand", + "futures-core", "pin-project", - "rand 0.8.5", "tokio", ] @@ -701,7 +603,7 @@ dependencies = [ "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -720,9 +622,15 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" + +[[package]] +name = "base64" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -744,14 +652,14 @@ name = "bellman_ce" version = "0.3.2" source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bit-vec", "blake2s_const", "blake2s_simd", "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.27", + "futures 0.3.28", "hex", "lazy_static", "num_cpus", @@ -796,12 +704,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "regex", "rustc-hash", "shlex", - "syn 2.0.12", + "syn 2.0.18", ] [[package]] @@ -821,9 +729,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.2.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" [[package]] name = "bitvec" @@ -854,7 +762,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -908,7 +816,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ "block-padding 0.2.1", - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -917,7 +825,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -945,20 +853,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" -[[package]] -name = "blocking" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" -dependencies = [ - "async-channel", - "async-lock", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", -] - [[package]] name = "brotli" version = "3.3.4" @@ -982,9 +876,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "serde", @@ -992,9 +886,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -1008,6 +902,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecount" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" + [[package]] name = "byteorder" version = "1.4.3" @@ -1050,6 +950,37 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "camino" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + [[package]] name = "cast" version = "0.3.0" @@ -1088,13 +1019,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "rustc-serialize", "serde", @@ -1105,9 +1036,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -1116,15 +1047,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -1136,7 +1067,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -1149,9 +1080,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1187,9 +1118,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.4" +version = "4.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ac1f6381d8d82ab4684768f89c0ea3afe66925ceadb4eeb3fc452ffc55d62" +checksum = "80672091db20273a15cf9fdd4e47ed43b5091ec9841bf4c6145c9dfbbcae09ed" dependencies = [ "clap_builder", "clap_derive", @@ -1198,27 +1129,27 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.2.4" +version = "4.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84080e799e54cff944f4b4a4b0e71630b0e0443b25b985175c7dddc1a859b749" +checksum = "c1458a1df40e1e2afebb7ab60ce55c1fa8f431146205aa5f4887e0b111c27636" dependencies = [ "anstream", "anstyle", "bitflags 1.3.2", - "clap_lex 0.4.1", + "clap_lex 0.5.0", "strsim 0.10.0", ] [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 2.0.12", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -1232,9 +1163,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "cloudabi" @@ -1270,16 +1201,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "colorchoice" version = "1.0.0" @@ -1297,20 +1218,17 @@ dependencies = [ ] [[package]] -name = "concurrent-queue" -version = "2.1.0" +name = "console" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ - "crossbeam-utils 0.8.15", + "encode_unicode", + "lazy_static", + "libc", + "windows-sys 0.45.0", ] -[[package]] -name = "const-decoder" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5241cd7938b1b415942e943ea96f615953d500b50347b505b0b507080bad5a6f" - [[package]] name = "const-oid" version = "0.7.1" @@ -1351,7 +1269,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" dependencies = [ "percent-encoding", - "time 0.3.20", + "time 0.3.22", "version_check", ] @@ -1367,15 +1285,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -1447,13 +1365,14 @@ dependencies = [ "anyhow", "ctrlc", "envy", - "futures 0.3.27", + "futures 0.3.28", "prometheus_exporter", "serde", "serde_json", "tokio", "vlog", "zksync_types", + "zksync_utils", "zksync_web3_decl", ] @@ -1478,11 +1397,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.14", + "crossbeam-epoch 0.9.15", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1497,12 +1416,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1523,8 +1442,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1544,14 +1463,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", - "memoffset 0.8.0", + "crossbeam-utils 0.8.16", + "memoffset 0.9.0", "scopeguard", ] @@ -1573,7 +1492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1589,9 +1508,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -1608,7 +1527,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1618,7 +1537,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1630,7 +1549,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "typenum", ] @@ -1640,7 +1559,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1650,7 +1569,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1660,32 +1579,22 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote 1.0.26", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.6.0" @@ -1697,56 +1606,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix", - "windows-sys 0.45.0", -] - -[[package]] -name = "cxx" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" -dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "windows-sys 0.48.0", ] [[package]] @@ -1767,8 +1632,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -1780,7 +1645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -1794,15 +1659,15 @@ dependencies = [ "hashbrown 0.12.3", "lock_api", "once_cell", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] name = "db_test_macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -1843,8 +1708,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -1855,8 +1720,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustc_version", "syn 1.0.109", ] @@ -1876,14 +1741,14 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1918,9 +1783,9 @@ checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" [[package]] name = "dotenvy" -version = "0.15.6" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d8c417d7a8cb362e0c37e5d815f5eb7c37f79ff93707329d5a194e42e54ca0" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "ecdsa" @@ -1952,9 +1817,9 @@ dependencies = [ "base16ct", "crypto-bigint 0.4.9", "der 0.6.1", - "digest 0.10.6", + "digest 0.10.7", "ff", - "generic-array 0.14.6", + "generic-array 0.14.7", "group", "pkcs8 0.9.0", "rand_core 0.6.4", @@ -1963,6 +1828,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.32" @@ -1985,6 +1856,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + [[package]] name = "envy" version = "0.4.2" @@ -1996,13 +1880,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] @@ -2015,6 +1899,15 @@ dependencies = [ "libc", ] +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + [[package]] name = "ethabi" version = "16.0.0" @@ -2110,8 +2003,8 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] @@ -2148,12 +2041,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2179,9 +2072,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -2252,9 +2145,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2267,9 +2160,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2277,15 +2170,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2306,47 +2199,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" - -[[package]] -name = "futures-lite" -version = "1.12.0" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2360,9 +2238,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2388,9 +2266,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2409,9 +2287,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2420,9 +2298,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -2436,7 +2314,7 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ - "aho-corasick", + "aho-corasick 0.7.20", "bstr", "fnv", "log", @@ -2490,11 +2368,12 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.9.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" dependencies = [ "async-trait", - "base64 0.21.0", + "base64 0.21.2", "google-cloud-metadata", "google-cloud-token", "home", @@ -2503,27 +2382,17 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "tokio", "tracing", "urlencoding", ] -[[package]] -name = "google-cloud-default" -version = "0.1.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" -dependencies = [ - "async-trait", - "google-cloud-auth", - "google-cloud-metadata", - "google-cloud-storage", -] - [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" dependencies = [ "reqwest", "thiserror", @@ -2532,13 +2401,16 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.10.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" dependencies = [ "async-stream", - "base64 0.21.0", + "base64 0.21.2", "bytes 1.4.0", "futures-util", + "google-cloud-auth", + "google-cloud-metadata", "google-cloud-token", "hex", "once_cell", @@ -2551,7 +2423,7 @@ dependencies = [ "serde_json", "sha2 0.10.6", "thiserror", - "time 0.3.20", + "time 0.3.22", "tokio", "tracing", "url", @@ -2559,8 +2431,9 @@ dependencies = [ [[package]] name = "google-cloud-token" -version = "0.1.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" dependencies = [ "async-trait", ] @@ -2572,7 +2445,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", - "futures 0.3.27", + "futures 0.3.28", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2595,9 +2468,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes 1.4.0", "fnv", @@ -2608,7 +2481,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] @@ -2620,9 +2493,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -2659,6 +2532,16 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "hdrhistogram" +version = "7.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +dependencies = [ + "byteorder", + "num-traits", +] + [[package]] name = "headers" version = "0.3.8" @@ -2755,16 +2638,16 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "home" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi 0.3.9", + "windows-sys 0.48.0", ] [[package]] @@ -2800,6 +2683,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -2820,9 +2709,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -2844,9 +2733,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", @@ -2855,7 +2744,6 @@ dependencies = [ "rustls-native-certs", "tokio", "tokio-rustls", - "webpki-roots", ] [[package]] @@ -2891,26 +2779,25 @@ checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi 0.3.9", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2932,9 +2819,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2973,21 +2860,35 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", ] +[[package]] +name = "insta" +version = "1.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a28d25139df397cbca21408bb742cf6837e04cdbebf1b07b760caf971d6a972" +dependencies = [ + "console", + "lazy_static", + "linked-hash-map", + "serde", + "similar", + "yaml-rust", +] + [[package]] name = "instant" version = "0.1.12" @@ -2999,12 +2900,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3018,9 +2920,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "ipnetwork" @@ -3028,16 +2930,26 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" +[[package]] +name = "iri-string" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3072,9 +2984,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -3085,7 +2997,7 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "derive_more", - "futures 0.3.27", + "futures 0.3.28", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-pubsub", "log", @@ -3099,7 +3011,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "futures-executor", "futures-util", "log", @@ -3113,7 +3025,7 @@ name = "jsonrpc-core" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "futures-executor", "futures-util", "log", @@ -3127,7 +3039,7 @@ name = "jsonrpc-core-client" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "jsonrpc-client-transports", ] @@ -3137,8 +3049,8 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3147,7 +3059,7 @@ name = "jsonrpc-http-server" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "hyper", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-server-utils", @@ -3162,7 +3074,7 @@ name = "jsonrpc-pubsub" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "lazy_static", "log", @@ -3177,7 +3089,7 @@ version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ "bytes 1.4.0", - "futures 0.3.27", + "futures 0.3.28", "globset", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "lazy_static", @@ -3193,7 +3105,7 @@ name = "jsonrpc-ws-server" version = "18.0.0" source = "git+https://github.com/matter-labs/jsonrpc.git?branch=master#12c53e3e20c09c2fb9966a4ef1b0ea63de172540" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-server-utils", "log", @@ -3204,9 +3116,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" +checksum = "1822d18e4384a5e79d94dc9e4d1239cfa9fad24e55b44d2efeff5b394c9fece4" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3221,41 +3133,36 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" +checksum = "11aa5766d5c430b89cb26a99b88f3245eb91534be8126102cea9e45ee3891b22" dependencies = [ - "anyhow", "futures-channel", - "futures-timer", "futures-util", "gloo-net", "http", "jsonrpsee-core", - "jsonrpsee-types", "pin-project", "rustls-native-certs", "soketto", "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] name = "jsonrpsee-core" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" +checksum = "64c6832a55f662b5a6ecc844db24b8b9c387453f923de863062c60ce33d62b81" dependencies = [ "anyhow", - "arrayvec 0.7.2", "async-lock", "async-trait", "beef", - "futures-channel", "futures-timer", "futures-util", "globset", @@ -3269,51 +3176,50 @@ dependencies = [ "soketto", "thiserror", "tokio", + "tokio-stream", "tracing", "wasm-bindgen-futures", ] [[package]] name = "jsonrpsee-http-client" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" +checksum = "1705c65069729e3dccff6fd91ee431d5d31cabcf00ce68a62a2c6435ac713af9" dependencies = [ "async-trait", "hyper", "hyper-rustls", "jsonrpsee-core", "jsonrpsee-types", - "rustc-hash", "serde", "serde_json", "thiserror", "tokio", + "tower", "tracing", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" +checksum = "c6027ac0b197ce9543097d02a290f550ce1d9432bf301524b013053c0b75cc94" dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "jsonrpsee-server" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" +checksum = "4f06661d1a6b6e5b85469dc9c29acfbb9b3bb613797a6fd10a3ebb8a70754057" dependencies = [ - "futures-channel", "futures-util", - "http", "hyper", "jsonrpsee-core", "jsonrpsee-types", @@ -3322,16 +3228,16 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower", "tracing", ] [[package]] name = "jsonrpsee-types" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" +checksum = "6e5bf6c75ce2a4217421154adfc65a24d2b46e77286e59bba5d9fa6544ccc8f4" dependencies = [ "anyhow", "beef", @@ -3343,9 +3249,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77310456f43c6c89bcba1f6b2fc2a28300da7c341f320f5128f8c83cc63232d" +checksum = "34e6ea7c6d862e60f8baebd946c037b70c6808a4e4e31e792a4029184e3ce13a" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -3354,9 +3260,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.16.2" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" +checksum = "a64b2589680ba1ad7863f279cd2d5083c1dc0a7c0ea959d22924553050f8ab9f" dependencies = [ "http", "jsonrpsee-client-transport", @@ -3366,11 +3272,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -3392,9 +3298,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -3410,12 +3316,13 @@ dependencies = [ ] [[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +name = "l1_tx_effective_gas_price_migration" +version = "0.1.0" dependencies = [ - "log", + "structopt", + "tokio", + "zksync_dal", + "zksync_types", ] [[package]] @@ -3447,9 +3354,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.140" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libloading" @@ -3463,9 +3370,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "librocksdb-sys" @@ -3483,9 +3390,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", @@ -3493,19 +3400,16 @@ dependencies = [ ] [[package]] -name = "link-cplusplus" -version = "1.0.8" +name = "linked-hash-map" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "loadnext" @@ -3514,7 +3418,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.27", + "futures 0.3.28", "hex", "metrics", "num 0.3.1", @@ -3559,9 +3463,9 @@ checksum = "e34f76eb3611940e0e7d53a9aaa4e6a3151f69541a282fd0dad5571420c53ff1" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -3569,13 +3473,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", - "value-bag", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach" @@ -3598,7 +3498,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -3641,9 +3541,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -3656,7 +3556,7 @@ checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ "ahash 0.7.6", "metrics-macros", - "portable-atomic", + "portable-atomic 0.3.20", ] [[package]] @@ -3671,7 +3571,7 @@ dependencies = [ "metrics", "metrics-util", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta 0.10.1", "thiserror", "tokio", @@ -3684,8 +3584,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3695,22 +3595,22 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", "hashbrown 0.12.3", "metrics", "num_cpus", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta 0.10.1", "sketches-ddsketch", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -3722,6 +3622,21 @@ dependencies = [ "unicase", ] +[[package]] +name = "mini-moka" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cafc5ec7807288595f9c20c86e6ce6d262b722f61e0547fe7e6e6e6451b58d5" +dependencies = [ + "crossbeam-channel 0.5.8", + "crossbeam-utils 0.8.16", + "dashmap", + "skeptic", + "smallvec", + "tagptr", + "triomphe", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3737,6 +3652,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.6.23" @@ -3758,14 +3682,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3818,9 +3742,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.38" +version = "0.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" dependencies = [ "cfg-if 0.1.10", "libc", @@ -3914,7 +3838,6 @@ dependencies = [ "autocfg 1.1.0", "num-integer", "num-traits", - "rand 0.7.3", "serde", ] @@ -3954,7 +3877,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5" dependencies = [ "num-traits", - "rand 0.7.3", "serde", ] @@ -3984,8 +3906,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4057,18 +3979,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -4090,9 +4012,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -4105,13 +4027,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4122,11 +4044,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "pkg-config", @@ -4140,7 +4061,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "futures-channel", "futures-executor", "futures-util", @@ -4172,7 +4093,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.27", + "futures 0.3.28", "futures-util", "http", "opentelemetry", @@ -4197,9 +4118,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" +checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" dependencies = [ "log", "serde", @@ -4208,9 +4129,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.5.0" +version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" +checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" [[package]] name = "ouroboros" @@ -4230,8 +4151,8 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4284,7 +4205,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -4299,8 +4220,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4322,12 +4243,6 @@ dependencies = [ "url", ] -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - [[package]] name = "parking_lot" version = "0.11.2" @@ -4346,7 +4261,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -4358,22 +4273,22 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.0", ] [[package]] @@ -4440,15 +4355,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -4456,9 +4371,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -4466,22 +4381,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -4500,22 +4415,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4564,9 +4479,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "plotters" @@ -4597,26 +4512,19 @@ dependencies = [ ] [[package]] -name = "polling" -version = "2.6.0" +name = "portable-atomic" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" dependencies = [ - "autocfg 1.1.0", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.45.0", + "portable-atomic 1.3.3", ] [[package]] name = "portable-atomic" -version = "0.3.19" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -4626,12 +4534,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" dependencies = [ - "proc-macro2 1.0.52", - "syn 2.0.12", + "proc-macro2 1.0.60", + "syn 2.0.18", ] [[package]] @@ -4663,7 +4571,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit 0.19.6", + "toml_edit 0.19.10", ] [[package]] @@ -4673,8 +4581,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -4685,8 +4593,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "version_check", ] @@ -4707,9 +4615,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -4763,8 +4671,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4778,13 +4686,24 @@ dependencies = [ "prost", ] +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + [[package]] name = "quanta" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "libc", "mach", "once_cell", @@ -4800,7 +4719,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "libc", "mach", "once_cell", @@ -4821,11 +4740,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", ] [[package]] @@ -4950,7 +4869,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -5059,9 +4978,9 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "num_cpus", ] @@ -5083,26 +5002,36 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.3" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.2", "memchr", - "regex-syntax", + "regex-automata 0.3.2", + "regex-syntax 0.7.3", ] [[package]] @@ -5111,7 +5040,18 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +dependencies = [ + "aho-corasick 1.0.2", + "memchr", + "regex-syntax 0.7.3", ] [[package]] @@ -5120,6 +5060,12 @@ version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +[[package]] +name = "regex-syntax" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -5131,11 +5077,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -5163,14 +5109,14 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 0.22.6", "winreg", ] @@ -5180,7 +5126,7 @@ version = "0.4.1" source = "git+https://github.com/matter-labs/rescue-poseidon#f611a3353e48cf42153e44d89ed90da9bc5934e8" dependencies = [ "addchain", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "blake2 0.10.6", "byteorder", "franklin-crypto", @@ -5258,7 +5204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ "byteorder", - "digest 0.10.6", + "digest 0.10.7", "num-bigint-dig", "num-integer", "num-iter", @@ -5273,9 +5219,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -5306,35 +5252,35 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", + "rustls-webpki", "sct", - "webpki", ] [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -5348,7 +5294,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -5396,12 +5352,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.5.0" @@ -5436,7 +5386,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", "der 0.6.1", - "generic-array 0.14.6", + "generic-array 0.14.7", "pkcs8 0.9.0", "subtle", "zeroize", @@ -5472,9 +5422,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -5485,9 +5435,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -5498,6 +5448,9 @@ name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] [[package]] name = "send_wrapper" @@ -5507,9 +5460,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "sentry" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" +checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" dependencies = [ "httpdate", "native-tls", @@ -5519,15 +5472,16 @@ dependencies = [ "sentry-core", "sentry-debug-images", "sentry-panic", + "sentry-tracing", "tokio", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" +checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" dependencies = [ "backtrace", "once_cell", @@ -5537,9 +5491,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" +checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" dependencies = [ "hostname", "libc", @@ -5551,9 +5505,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" +checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" dependencies = [ "once_cell", "rand 0.8.5", @@ -5564,9 +5518,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" dependencies = [ "findshlibs", "once_cell", @@ -5575,56 +5529,68 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.30.0" +version = "0.31.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" +dependencies = [ + "sentry-backtrace", + "sentry-core", +] + +[[package]] +name = "sentry-tracing" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" +checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" dependencies = [ "sentry-backtrace", "sentry-core", + "tracing-core", + "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" +checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" dependencies = [ "debugid", - "getrandom 0.2.8", + "getrandom 0.2.10", "hex", "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa 1.0.6", @@ -5662,8 +5628,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -5700,7 +5666,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -5724,7 +5690,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -5745,7 +5711,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -5764,16 +5730,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" -[[package]] -name = "signal-hook" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -5789,12 +5745,18 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] [[package]] -name = "simple_asn1" +name = "similar" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" + +[[package]] +name = "simple_asn1" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" @@ -5802,14 +5764,29 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "thiserror", - "time 0.3.20", + "time 0.3.22", +] + +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", ] [[package]] name = "sketches-ddsketch" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" @@ -5844,7 +5821,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes 1.4.0", - "futures 0.3.27", + "futures 0.3.28", "http", "httparse", "log", @@ -5920,9 +5897,9 @@ dependencies = [ "bytes 1.4.0", "chrono", "crc", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "dirs", "either", "futures-channel", @@ -5953,6 +5930,7 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", + "tokio-stream", "url", "whoami", ] @@ -5968,8 +5946,8 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "serde_json", "sha2 0.9.9", @@ -5985,9 +5963,10 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" dependencies = [ - "async-native-tls", - "async-std", "native-tls", + "once_cell", + "tokio", + "tokio-native-tls", ] [[package]] @@ -6037,8 +6016,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -6058,8 +6037,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -6087,28 +6066,28 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "sync_vm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +version = "1.3.3" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "cs_derive", "derivative", "franklin-crypto", @@ -6134,20 +6113,22 @@ name = "system-constants-generator" version = "0.1.0" dependencies = [ "codegen 0.2.0", - "num 0.3.1", "once_cell", - "rand 0.7.3", "serde", "serde_json", - "tempfile", "vm", "zksync_contracts", "zksync_state", - "zksync_storage", "zksync_types", "zksync_utils", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -6166,15 +6147,16 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg 1.1.0", "cfg-if 1.0.0", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -6188,12 +6170,12 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" +checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -6214,22 +6196,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -6254,9 +6236,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa 1.0.6", "serde", @@ -6266,15 +6248,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -6324,21 +6306,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.27.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", "libc", - "mio 0.8.6", + "mio 0.8.8", "num_cpus", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6353,13 +6335,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 2.0.12", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -6374,20 +6356,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", - "webpki", ] [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -6410,9 +6391,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes 1.4.0", "futures-core", @@ -6434,9 +6415,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" @@ -6451,9 +6432,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.6" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -6497,9 +6478,9 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", "prost-build", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -6511,16 +6492,47 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "hdrhistogram", "indexmap", "pin-project", "pin-project-lite", "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" +dependencies = [ + "async-compression", + "base64 0.20.0", + "bitflags 2.3.2", + "bytes 1.4.0", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "httpdate", + "iri-string", + "mime", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "tokio", + "tokio-util 0.7.8", + "tower", "tower-layer", "tower-service", "tracing", + "uuid", ] [[package]] @@ -6550,20 +6562,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -6616,9 +6628,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -6629,13 +6641,19 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.20", + "time 0.3.22", "tracing", "tracing-core", "tracing-log", "tracing-serde", ] +[[package]] +name = "triomphe" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" + [[package]] name = "try-lock" version = "0.2.4" @@ -6686,15 +6704,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -6737,11 +6755,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "native-tls", "once_cell", @@ -6750,12 +6768,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", "serde", ] @@ -6774,11 +6792,11 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "serde", ] @@ -6788,16 +6806,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - [[package]] name = "vcpkg" version = "0.2.15" @@ -6835,23 +6843,21 @@ dependencies = [ name = "vm" version = "0.1.0" dependencies = [ + "anyhow", + "ethabi", "hex", "itertools", "metrics", "once_cell", - "serde", - "serde_json", - "tempfile", "thiserror", - "tracing", + "tokio", "vlog", "zk_evm", "zkevm-assembly", "zksync_config", "zksync_contracts", - "zksync_crypto", + "zksync_eth_signer", "zksync_state", - "zksync_storage", "zksync_types", "zksync_utils", ] @@ -6883,30 +6889,22 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi 0.3.9", "winapi-util", ] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -6930,9 +6928,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -6940,24 +6938,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -6967,32 +6965,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -7009,9 +7007,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -7023,13 +7021,13 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "base64 0.13.1", "bytes 1.4.0", "derive_more", "ethabi", "ethereum-types", - "futures 0.3.27", + "futures 0.3.28", "futures-timer", "headers", "hex", @@ -7067,6 +7065,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki", +] + [[package]] name = "which" version = "4.4.0" @@ -7131,6 +7138,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-sys" version = "0.42.0" @@ -7280,9 +7296,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -7312,31 +7328,53 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zk_evm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.3#9a1eaa98acb9e3280dbbde5b132cbf64e15fe96e" dependencies = [ + "anyhow", "lazy_static", "num 0.4.0", "serde", "serde_json", "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs", +] + +[[package]] +name = "zk_evm_abstractions" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zk_evm_abstractions.git#839721a4ae2093c5c0aa8ffd49758f32ecd172ed" +dependencies = [ + "anyhow", + "serde", + "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#a276422b9f874242eeaeecd1434542565064f85e" dependencies = [ - "env_logger", + "env_logger 0.9.3", "hex", "lazy_static", "log", @@ -7356,7 +7394,7 @@ name = "zkevm_opcode_defs" version = "1.3.2" source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags 2.2.1", + "bitflags 2.3.2", "blake2 0.10.6", "ethereum-types", "k256", @@ -7367,15 +7405,15 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.3#363ead7afaac72bd3006c49d501934747781cbb4" dependencies = [ "bincode", "circuit_testing", "codegen 0.2.0", "crossbeam 0.8.2", "derivative", - "env_logger", + "env_logger 0.10.0", "hex", "num-bigint 0.4.3", "num-integer", @@ -7426,7 +7464,7 @@ dependencies = [ "async-trait", "backon", "convert_case 0.6.0", - "futures 0.3.27", + "futures 0.3.28", "hex", "serde", "serde_json", @@ -7464,7 +7502,7 @@ dependencies = [ "chrono", "ctrlc", "ethabi", - "futures 0.3.27", + "futures 0.3.28", "hex", "lazy_static", "metrics", @@ -7482,6 +7520,7 @@ dependencies = [ "zksync_dal", "zksync_queued_job_processor", "zksync_types", + "zksync_utils", ] [[package]] @@ -7507,15 +7546,15 @@ dependencies = [ "assert_matches", "async-trait", "bigdecimal", - "bincode", "bitflags 1.3.2", "chrono", - "clap 4.2.4", + "clap 4.3.4", "ctrlc", "db_test_macro", - "futures 0.3.27", + "futures 0.3.28", "governor", "hex", + "hyper", "itertools", "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", "jsonrpc-core-client", @@ -7534,6 +7573,8 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tower", + "tower-http", "tracing", "vlog", "vm", @@ -7541,13 +7582,11 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_dal", - "zksync_db_storage_provider", "zksync_eth_client", "zksync_eth_signer", "zksync_health_check", "zksync_mempool", "zksync_merkle_tree", - "zksync_merkle_tree2", "zksync_mini_merkle_tree", "zksync_object_store", "zksync_prover_utils", @@ -7581,7 +7620,6 @@ name = "zksync_dal" version = "1.0.0" dependencies = [ "anyhow", - "async-std", "bigdecimal", "bincode", "db_test_macro", @@ -7592,26 +7630,15 @@ dependencies = [ "once_cell", "serde_json", "sqlx", + "strum", "thiserror", "tokio", "vlog", - "vm", "zksync_config", "zksync_contracts", "zksync_health_check", - "zksync_state", - "zksync_storage", "zksync_types", "zksync_utils", - "zksync_web3_decl", -] - -[[package]] -name = "zksync_db_storage_provider" -version = "1.0.0" -dependencies = [ - "zksync_dal", - "zksync_types", ] [[package]] @@ -7641,7 +7668,7 @@ dependencies = [ "actix-rt", "actix-web", "async-trait", - "futures 0.3.27", + "futures 0.3.28", "hex", "jsonrpc-core 18.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto", @@ -7661,58 +7688,49 @@ name = "zksync_external_node" version = "1.0.0" dependencies = [ "anyhow", + "envy", + "futures 0.3.28", "prometheus_exporter", + "serde", "tokio", + "url", "vlog", "zksync_basic_types", "zksync_config", + "zksync_contracts", "zksync_core", "zksync_dal", - "zksync_eth_client", "zksync_health_check", + "zksync_state", "zksync_storage", + "zksync_types", + "zksync_utils", + "zksync_web3_decl", ] [[package]] name = "zksync_health_check" version = "0.1.0" - -[[package]] -name = "zksync_mempool" -version = "1.0.0" dependencies = [ - "metrics", - "vlog", - "zksync_types", + "async-trait", ] [[package]] -name = "zksync_merkle_tree" +name = "zksync_mempool" version = "1.0.0" dependencies = [ - "bincode", - "byteorder", - "itertools", "metrics", - "once_cell", - "rayon", - "serde", - "tempfile", - "thiserror", "vlog", - "zksync_config", - "zksync_crypto", - "zksync_storage", "zksync_types", - "zksync_utils", ] [[package]] -name = "zksync_merkle_tree2" +name = "zksync_merkle_tree" version = "1.0.0" dependencies = [ "assert_matches", - "clap 4.2.4", + "clap 4.3.4", + "insta", "leb128", "metrics", "once_cell", @@ -7723,19 +7741,19 @@ dependencies = [ "serde_with", "tempfile", "thiserror", + "vlog", "zksync_config", "zksync_crypto", "zksync_storage", "zksync_types", - "zksync_utils", ] [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" dependencies = [ + "criterion", "once_cell", - "rayon", "zksync_basic_types", "zksync_crypto", ] @@ -7744,9 +7762,9 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "async-trait", "bincode", "google-cloud-auth", - "google-cloud-default", "google-cloud-storage", "http", "metrics", @@ -7762,7 +7780,7 @@ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ "ctrlc", - "futures 0.3.27", + "futures 0.3.28", "metrics", "regex", "reqwest", @@ -7787,70 +7805,53 @@ dependencies = [ name = "zksync_state" version = "1.0.0" dependencies = [ + "db_test_macro", "metrics", + "mini-moka", "tempfile", + "tokio", "vlog", + "zksync_dal", "zksync_storage", "zksync_types", - "zksync_utils", ] [[package]] name = "zksync_storage" version = "1.0.0" dependencies = [ - "bincode", - "byteorder", + "metrics", "num_cpus", - "once_cell", "rocksdb", - "serde", + "tempfile", "vlog", - "zksync_types", - "zksync_utils", ] [[package]] -name = "zksync_test_account" -version = "1.0.0" -dependencies = [ - "num 0.3.1", - "zksync_basic_types", - "zksync_crypto", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_testkit" +name = "zksync_test_node" version = "1.0.0" dependencies = [ "anyhow", - "futures 0.3.27", - "num 0.3.1", + "bigdecimal", + "clap 4.3.4", + "futures 0.3.28", + "jsonrpc-core 18.0.0 (git+https://github.com/matter-labs/jsonrpc.git?branch=master)", + "jsonrpc-http-server", "once_cell", - "rand 0.7.3", + "reqwest", "serde", - "serde_json", - "structopt", - "tempfile", "tokio", + "tracing", + "tracing-subscriber", "vlog", "vm", - "zksync_config", + "zksync_basic_types", "zksync_contracts", "zksync_core", - "zksync_crypto", - "zksync_dal", - "zksync_eth_client", - "zksync_eth_signer", - "zksync_mempool", - "zksync_prover_utils", "zksync_state", - "zksync_storage", - "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_web3_decl", ] [[package]] @@ -7861,13 +7862,11 @@ dependencies = [ "blake2 0.10.6", "chrono", "codegen 0.1.0", - "ethbloom", "hex", "metrics", "num 0.3.1", "once_cell", "parity-crypto", - "rayon", "rlp", "secp256k1 0.21.3", "serde", @@ -7875,7 +7874,6 @@ dependencies = [ "serde_with", "strum", "thiserror", - "tiny-keccak 1.5.0", "tokio", "zk_evm", "zkevm-assembly", @@ -7894,9 +7892,10 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.27", + "futures 0.3.28", "hex", "itertools", + "metrics", "num 0.3.1", "reqwest", "serde", @@ -7940,31 +7939,12 @@ dependencies = [ ] [[package]] -name = "zksync_witness_generator" -version = "1.0.0" +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "async-trait", - "bincode", - "const-decoder", - "futures 0.3.27", - "metrics", - "prometheus_exporter", - "rand 0.8.5", - "serde", - "structopt", - "tokio", - "vlog", - "vm", - "zksync_config", - "zksync_dal", - "zksync_db_storage_provider", - "zksync_object_store", - "zksync_prover_utils", - "zksync_queued_job_processor", - "zksync_state", - "zksync_types", - "zksync_utils", - "zksync_verification_key_generator_and_server", + "zstd-safe 5.0.2+zstd.1.5.2", ] [[package]] @@ -7973,14 +7953,24 @@ version = "0.12.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" dependencies = [ - "zstd-safe", + "zstd-safe 6.0.5+zstd.1.5.4", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", ] [[package]] name = "zstd-safe" -version = "6.0.4+zstd.1.5.4" +version = "6.0.5+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543" +checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" dependencies = [ "libc", "zstd-sys", @@ -7988,9 +7978,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.7+zstd.1.5.4" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 5bb5ea268596..a82c4f8a1281 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "core/bin/external_node", "core/bin/admin-tools", "core/bin/system-constants-generator", + "core/bin/test_node", "core/tests/cross_external_nodes_checker", # Contract verifier "core/bin/contract-verifier", @@ -16,9 +17,10 @@ members = [ # "core/bin/setup_key_generator_and_server", # Verification key generator and server "core/bin/verification_key_generator_and_server", - "core/bin/witness_generator", + #"core/bin/witness_generator", # circuit synthesizer: its commented as it cannot be built with stable rust. # "core/bin/circuit_synthesizer", + "core/bin/l1_tx_effective_gas_price_migration", # Libraries "core/lib/basic_types", "core/lib/config", @@ -31,7 +33,6 @@ members = [ "core/lib/eth_signer", "core/lib/mempool", "core/lib/merkle_tree", - "core/lib/merkle_tree2", "core/lib/object_store", "core/lib/mini_merkle_tree", "core/lib/prometheus_exporter", @@ -44,11 +45,9 @@ members = [ "core/lib/vlog", "core/lib/vm", "core/lib/web3_decl", - "core/lib/db_storage_provider", # Test infrastructure "core/tests/loadnext", - "core/tests/testkit", "core/tests/vm-benchmark", "core/tests/vm-benchmark/harness", @@ -57,10 +56,7 @@ members = [ ] resolver = "2" -exclude = [ "core/bin/prover", "core/bin/circuit_synthesizer", "core/bin/setup_key_generator_and_server"] - -[profile.test.package.zksync_merkle_tree] -opt-level = 3 +exclude = [ "core/bin/prover", "core/bin/circuit_synthesizer", "core/bin/setup_key_generator_and_server", "core/bin/witness_generator", "core/bin/vk_setup_data_generator_server_fri", "core/bin/prover_fri"] # for `perf` profiling [profile.perf] diff --git a/README.md b/README.md index 988df01a373b..d6e2e81b6837 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ at your option. - [GitHub](https://github.com/matter-labs) - [Twitter](https://twitter.com/zksync) - [Twitter for Devs](https://twitter.com/zkSyncDevs) -- [Discord](https://join.zksync.dev) +- [Discord](https://discord.gg/nMaPGrDDwk) ## Disclaimer diff --git a/bors.toml b/bors.toml index 683bf268844c..8513110ce503 100644 --- a/bors.toml +++ b/bors.toml @@ -7,9 +7,15 @@ status = [ "integration", "loadtest", "lint", - "testkit", "unit-tests", - "Build images / Build and Push Docker Images", + "Build images / Build and Push Docker Images (circuit-synthesizer)", + "Build images / Build and Push Docker Images (contract-verifier)", + "Build images / Build and Push Docker Images (external-node)", + "Build images / Build and Push Docker Images (prover-fri)", + "Build images / Build and Push Docker Images (prover-v2)", + "Build images / Build and Push Docker Images (server-v2)", + "Build images / Build and Push Docker Images (witness-generator)", + "Build images / Build and Push Docker Images (cross-external-nodes-checker)", ] use_squash_merge = true diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 1ff1c3e455f4..74b7016ab19f 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,393 @@ # Changelog +## [5.19.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.19.0...v5.19.1) (2023-07-13) + + +### Bug Fixes + +* **crypto:** update circuits, VK to fix sha256 ([#2172](https://github.com/matter-labs/zksync-2-dev/issues/2172)) ([3e56d26](https://github.com/matter-labs/zksync-2-dev/commit/3e56d26c6007b0cabeb7b5af712232df99d8dc12)) +* **healthcheck:** Don't panic if healthcheck stop channel is dropped ([#2174](https://github.com/matter-labs/zksync-2-dev/issues/2174)) ([51588ba](https://github.com/matter-labs/zksync-2-dev/commit/51588bacd60975eb697d2b9cb27e922666331cd4)) + +## [5.19.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.18.1...v5.19.0) (2023-07-13) + + +### Features + +* **api:** Expose metrics on SQL connections and number of requests in flight ([#2169](https://github.com/matter-labs/zksync-2-dev/issues/2169)) ([7cda24b](https://github.com/matter-labs/zksync-2-dev/commit/7cda24b858dbf79b84be09cacbd5a56a1663f592)) + +## [5.18.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.18.0...v5.18.1) (2023-07-12) + + +### Bug Fixes + +* **house-keeper:** rename server to prover_fri while emitting queued jobs metrics ([#2162](https://github.com/matter-labs/zksync-2-dev/issues/2162)) ([599eb7c](https://github.com/matter-labs/zksync-2-dev/commit/599eb7c187d7a6833d6bc7f7c539f7bfb2b9dc38)) + +## [5.18.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.17.0...v5.18.0) (2023-07-11) + + +### Features + +* **house-keeeper:** emit FRI prover jobs stats ([#2152](https://github.com/matter-labs/zksync-2-dev/issues/2152)) ([1fa413b](https://github.com/matter-labs/zksync-2-dev/commit/1fa413b656f967437008996084c2429b78e08c97)) +* **witness-gen-fri:** Save aux_output_witness in public GCS bucket ([#2160](https://github.com/matter-labs/zksync-2-dev/issues/2160)) ([848e8de](https://github.com/matter-labs/zksync-2-dev/commit/848e8ded0ca1806f6404f3bfeaabdf19b5a3c840)) + +## [5.17.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.16.1...v5.17.0) (2023-07-11) + + +### Features + +* **api:** Allow to disable VM limiter ([#2158](https://github.com/matter-labs/zksync-2-dev/issues/2158)) ([2c950c0](https://github.com/matter-labs/zksync-2-dev/commit/2c950c0729b945aced1769a4f88e46de4ca9c68d)) +* **db:** cache smart contract code queries ([#1988](https://github.com/matter-labs/zksync-2-dev/issues/1988)) ([fb331f5](https://github.com/matter-labs/zksync-2-dev/commit/fb331f529527a721c35a952444f90a110b1d2c79)) + + +### Bug Fixes + +* Rewrite duration metrics for Aggregation stage latency ([#2124](https://github.com/matter-labs/zksync-2-dev/issues/2124)) ([7e50d31](https://github.com/matter-labs/zksync-2-dev/commit/7e50d31217d86c15268232789c795757517e967f)) + +## [5.16.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.16.0...v5.16.1) (2023-07-10) + + +### Bug Fixes + +* **api:** Emit less logs ([#2144](https://github.com/matter-labs/zksync-2-dev/issues/2144)) ([51d7748](https://github.com/matter-labs/zksync-2-dev/commit/51d7748439f964c013e1b0124b52b03b871989c0)) + +## [5.16.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.15.0...v5.16.0) (2023-07-10) + + +### Features + +* **api:** Different config values for HTTP/WS server threads amount ([#2141](https://github.com/matter-labs/zksync-2-dev/issues/2141)) ([fc245f7](https://github.com/matter-labs/zksync-2-dev/commit/fc245f701a37d8b8e254183727005063c3275fb4)) + + +### Performance Improvements + +* **api:** Remove blocking code from API ([#2131](https://github.com/matter-labs/zksync-2-dev/issues/2131)) ([ca83489](https://github.com/matter-labs/zksync-2-dev/commit/ca83489d83f7ad0adbfdd50db21a52edd7c7fbc2)) + +## [5.15.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.14.2...v5.15.0) (2023-07-10) + + +### Features + +* **witness-gen-fri:** save BlockAuxilaryOutputWitness in GCS in case its need for debugging ([#2137](https://github.com/matter-labs/zksync-2-dev/issues/2137)) ([fdc6127](https://github.com/matter-labs/zksync-2-dev/commit/fdc612735e2a54ce84645828de8473fa1cfd0895)) + +## [5.14.2](https://github.com/matter-labs/zksync-2-dev/compare/v5.14.1...v5.14.2) (2023-07-09) + + +### Bug Fixes + +* **house-keeper:** make prover db pool size configurable ([#2138](https://github.com/matter-labs/zksync-2-dev/issues/2138)) ([12d101c](https://github.com/matter-labs/zksync-2-dev/commit/12d101cc469504b0ce58b2d583d8f8373f5773ff)) + +## [5.14.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.14.0...v5.14.1) (2023-07-07) + + +### Bug Fixes + +* **crypto:** update harness to use log_tracing to supress println's from boojum ([#2134](https://github.com/matter-labs/zksync-2-dev/issues/2134)) ([b0655ba](https://github.com/matter-labs/zksync-2-dev/commit/b0655ba4e8bba5264c59cff83008af7390ed963f)) + +## [5.14.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.13.1...v5.14.0) (2023-07-07) + + +### Features + +* **prover-fri:** add metrics for FRI prover and witness-gen ([#2128](https://github.com/matter-labs/zksync-2-dev/issues/2128)) ([5cea755](https://github.com/matter-labs/zksync-2-dev/commit/5cea755285e75f40cff1412a100508aa34c68922)) + + +### Bug Fixes + +* **sdk:** Fix getting receipt for transactions rejected in statekeeper ([#2071](https://github.com/matter-labs/zksync-2-dev/issues/2071)) ([c97e494](https://github.com/matter-labs/zksync-2-dev/commit/c97e494c1ef7f58fe8632a3ebf943d775b1703cb)) + +## [5.13.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.13.0...v5.13.1) (2023-07-06) + + +### Bug Fixes + +* **fri-witness-generator:** update harness and use different vk for node at diff depth ([#2116](https://github.com/matter-labs/zksync-2-dev/issues/2116)) ([82fd38c](https://github.com/matter-labs/zksync-2-dev/commit/82fd38c3e6bd62f9ac4785d732dc01099b73d972)) +* **healthcheck:** Do not kill health check ([#2115](https://github.com/matter-labs/zksync-2-dev/issues/2115)) ([aec1792](https://github.com/matter-labs/zksync-2-dev/commit/aec1792e84e3c91eeef619d0dfa3f66c2323828b)) +* **object_store:** switch to using published version for gcs ([#2118](https://github.com/matter-labs/zksync-2-dev/issues/2118)) ([c779569](https://github.com/matter-labs/zksync-2-dev/commit/c779569af18911f1a2f2ef3d2c8c628e37d4038d)) + + +### Performance Improvements + +* **state-keeper:** Make `BatchExecutor` async-aware ([#2109](https://github.com/matter-labs/zksync-2-dev/issues/2109)) ([cc992b8](https://github.com/matter-labs/zksync-2-dev/commit/cc992b80adbcf02e6a68228a9531a777d00bca47)) + +## [5.13.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.12.1...v5.13.0) (2023-07-05) + + +### Features + +* Add metrics for tracking eth_tx's stage transition duration PLA-146 ([#2084](https://github.com/matter-labs/zksync-2-dev/issues/2084)) ([4c29be3](https://github.com/matter-labs/zksync-2-dev/commit/4c29be30618ded958c961d7473632d1f8f5efa26)) +* **api:** Fix api health check ([#2108](https://github.com/matter-labs/zksync-2-dev/issues/2108)) ([406d6ba](https://github.com/matter-labs/zksync-2-dev/commit/406d6ba4c6c588304d74baacf9b3d66deb82e60a)) +* **api:** Use dedicated tokio runtime for VM in API ([#2111](https://github.com/matter-labs/zksync-2-dev/issues/2111)) ([e088b8b](https://github.com/matter-labs/zksync-2-dev/commit/e088b8b6f6de1da63fe000325bb4a7faddbdf862)) +* **house-keeper:** emit seperate metrics for FRI witness-gen jobs in house-keeper ([#2112](https://github.com/matter-labs/zksync-2-dev/issues/2112)) ([fd616de](https://github.com/matter-labs/zksync-2-dev/commit/fd616defbb6380a876faeda33a0901dd9e4b9f57)) +* **prover-fri:** save scheduler proofs in public bucket as well ([#2101](https://github.com/matter-labs/zksync-2-dev/issues/2101)) ([8979649](https://github.com/matter-labs/zksync-2-dev/commit/897964911e7ba610722d82ae0182e60973736794)) +* **state-keeper:** Log miniblock sealing ([#2105](https://github.com/matter-labs/zksync-2-dev/issues/2105)) ([fd6e8b4](https://github.com/matter-labs/zksync-2-dev/commit/fd6e8b4b6a03ba0071645233c7a2ad2e7d3e9f5c)) + + +### Bug Fixes + +* **house-keeper:** enable GCS blob cleaner ([#2103](https://github.com/matter-labs/zksync-2-dev/issues/2103)) ([bd79319](https://github.com/matter-labs/zksync-2-dev/commit/bd79319cb24d00e76407027aa9f83b395f685cb0)) +* **witness-gen-fri:** update harness+zk_evm to fix witness gen and proof gen failure ([#2113](https://github.com/matter-labs/zksync-2-dev/issues/2113)) ([d445325](https://github.com/matter-labs/zksync-2-dev/commit/d445325cb7f70ffbfa2a3555ccc0f674e8810ee6)) + +## [5.12.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.12.0...v5.12.1) (2023-07-04) + + +### Bug Fixes + +* **api:** Gracefull shutdown web3 api ([#2075](https://github.com/matter-labs/zksync-2-dev/issues/2075)) ([bd45e57](https://github.com/matter-labs/zksync-2-dev/commit/bd45e574d11e137924e4be5ecc6ae10a5d0f465b)) +* **external node:** Remove SK config from EN's TxSender ([#2093](https://github.com/matter-labs/zksync-2-dev/issues/2093)) ([aa04eaf](https://github.com/matter-labs/zksync-2-dev/commit/aa04eaf0f3b795b32dc1d6e25725a8ac7257ef99)) +* **witness-gen:** update harness to fix FRI node agg witness-gen error ([#2104](https://github.com/matter-labs/zksync-2-dev/issues/2104)) ([eb68c5a](https://github.com/matter-labs/zksync-2-dev/commit/eb68c5a47d4674aa43edfafca161526c64bd912a)) + +## [5.12.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.11.0...v5.12.0) (2023-07-04) + + +### Features + +* **contract-verifier:** add new zkvyper binaries and enable test ([#2096](https://github.com/matter-labs/zksync-2-dev/issues/2096)) ([308259e](https://github.com/matter-labs/zksync-2-dev/commit/308259e2f063e3a9fcf032372427be13344ed227)) + + +### Bug Fixes + +* **init:** Run gas adjuster only if necessary ([#2081](https://github.com/matter-labs/zksync-2-dev/issues/2081)) ([2ea9560](https://github.com/matter-labs/zksync-2-dev/commit/2ea95601fe433db759cc067e062d7e3b9c346a16)) + +## [5.11.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.10.1...v5.11.0) (2023-07-04) + + +### Features + +* **api:** add `gas_per_pubdata` to `zks_getTransactionDetails` ([#2085](https://github.com/matter-labs/zksync-2-dev/issues/2085)) ([dd91bb6](https://github.com/matter-labs/zksync-2-dev/commit/dd91bb673b29a17cea91e12ec95f53deba556798)) + + +### Bug Fixes + +* **prover-fri:** update harness+circuits+boojum to fix proof failures ([#2094](https://github.com/matter-labs/zksync-2-dev/issues/2094)) ([e70c6f5](https://github.com/matter-labs/zksync-2-dev/commit/e70c6f5f08093a45a6958c80128518a431c63082)) + +## [5.10.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.10.0...v5.10.1) (2023-07-03) + + +### Bug Fixes + +* **witness-gen-fri:** pass server db url while processing to witness-gen ([#2091](https://github.com/matter-labs/zksync-2-dev/issues/2091)) ([b904ffb](https://github.com/matter-labs/zksync-2-dev/commit/b904ffb0e51add2e6e9ed80244bd5ca51f988ada)) + +## [5.10.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.9.0...v5.10.0) (2023-07-03) + + +### Features + +* **api:** blockHash support in eth_getLogs ([#2072](https://github.com/matter-labs/zksync-2-dev/issues/2072)) ([4110bc0](https://github.com/matter-labs/zksync-2-dev/commit/4110bc0ef6085578770bad68f23990546f9fe8a9)) +* **object store:** Make object store and GCS async ([#2050](https://github.com/matter-labs/zksync-2-dev/issues/2050)) ([266ee68](https://github.com/matter-labs/zksync-2-dev/commit/266ee68639cafcf198c0d19c2cdbcb07108ff0de)) + + +### Bug Fixes + +* **db:** add FOR UPDATE to query ([#2086](https://github.com/matter-labs/zksync-2-dev/issues/2086)) ([4f42cdb](https://github.com/matter-labs/zksync-2-dev/commit/4f42cdbddde46ee8f7ac3404b98d5384bf2ff3ec)) +* set effective_gas_price for priority txs ([#2078](https://github.com/matter-labs/zksync-2-dev/issues/2078)) ([2bcdd52](https://github.com/matter-labs/zksync-2-dev/commit/2bcdd521e64fc5029acf7313232e821847670674)) +* **witness-generator-fri:** pass prover DB variant as well to FRI witness-gen ([#2090](https://github.com/matter-labs/zksync-2-dev/issues/2090)) ([98b2743](https://github.com/matter-labs/zksync-2-dev/commit/98b274372e376e5e0630ad1dffc3269000927442)) + + +### Performance Improvements + +* **state-keeper:** Seal miniblocks in parallel to their execution ([#2026](https://github.com/matter-labs/zksync-2-dev/issues/2026)) ([4f4ba82](https://github.com/matter-labs/zksync-2-dev/commit/4f4ba823f0954f3cac46b1956a0eda3c3de274d9)) + +## [5.9.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.8.0...v5.9.0) (2023-07-01) + + +### Features + +* **prover-fri:** move storing proofs away from DB to GCS ([#2070](https://github.com/matter-labs/zksync-2-dev/issues/2070)) ([4f97d3d](https://github.com/matter-labs/zksync-2-dev/commit/4f97d3de7d99b180fc5c1fc647be2a1367d0919d)) +* **witness-gen:** split witness-gen config for FRI and old ([#2073](https://github.com/matter-labs/zksync-2-dev/issues/2073)) ([5903ca0](https://github.com/matter-labs/zksync-2-dev/commit/5903ca0c185bf38df743f614c83912926b0931e4)) + + +### Bug Fixes + +* **crypto:** update VK's from FRI prover ([#2074](https://github.com/matter-labs/zksync-2-dev/issues/2074)) ([833f57f](https://github.com/matter-labs/zksync-2-dev/commit/833f57f2fc9505ced4964cf00b7dc057c74928ae)) +* **witness-gen:** update harness to fix FRI main VM proving ([#2080](https://github.com/matter-labs/zksync-2-dev/issues/2080)) ([edbad6b](https://github.com/matter-labs/zksync-2-dev/commit/edbad6b840231f78ad02542dce5be4ce1e7c1c91)) + +## [5.8.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.7.0...v5.8.0) (2023-06-30) + + +### Features + +* **contract-verifier:** implement vyper contracts verification ([#2059](https://github.com/matter-labs/zksync-2-dev/issues/2059)) ([6535506](https://github.com/matter-labs/zksync-2-dev/commit/65355065ec84ee4236eea1d48db9b929ad40bf24)) +* **fri-prover:** added proof verification based on config ([#2063](https://github.com/matter-labs/zksync-2-dev/issues/2063)) ([78aab56](https://github.com/matter-labs/zksync-2-dev/commit/78aab56ab8153b0f7fe7f6fc74a52c1f5bba7601)) +* **witness-gen:** add # of dependent jobs in node agg ([#2066](https://github.com/matter-labs/zksync-2-dev/issues/2066)) ([5f4f780](https://github.com/matter-labs/zksync-2-dev/commit/5f4f780d3399282491144ea8d2efbaba0904fc7a)) + + +### Bug Fixes + +* **dal:** add indices for new provers related table ([#2068](https://github.com/matter-labs/zksync-2-dev/issues/2068)) ([2aeb3be](https://github.com/matter-labs/zksync-2-dev/commit/2aeb3be478bda00dd01547dda3364436c1417f50)) +* stage tests ([#2058](https://github.com/matter-labs/zksync-2-dev/issues/2058)) ([707cfb5](https://github.com/matter-labs/zksync-2-dev/commit/707cfb57858ee590a40e36ce89124709836f99f8)) +* **witness-gen:** update harness to fix proof gen failure for fri pro… ([#2064](https://github.com/matter-labs/zksync-2-dev/issues/2064)) ([d9f7e88](https://github.com/matter-labs/zksync-2-dev/commit/d9f7e88be2650fc9c29f45829222758d086c356f)) + +## [5.7.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.6.0...v5.7.0) (2023-06-29) + + +### Features + +* **contract-verifier:** add zksolc v1.3.12 ([#2060](https://github.com/matter-labs/zksync-2-dev/issues/2060)) ([b379af9](https://github.com/matter-labs/zksync-2-dev/commit/b379af9d1b8435ec5ac0069c56e054ed4114de00)) + +## [5.6.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.5.1...v5.6.0) (2023-06-29) + + +### Features + +* (DONT MERGE!) Integrate WETH bridge into server & SDK ([#1929](https://github.com/matter-labs/zksync-2-dev/issues/1929)) ([b3caf1e](https://github.com/matter-labs/zksync-2-dev/commit/b3caf1e35718c742e8d1d59427855df3b9109300)) +* add tx_index_in_l1_batch field to L2ToL1Log ([#2032](https://github.com/matter-labs/zksync-2-dev/issues/2032)) ([3ce5779](https://github.com/matter-labs/zksync-2-dev/commit/3ce5779f500d5738c92e09eff13d553e20625055)) +* Clasify crypto alerts and monitor them ([#1895](https://github.com/matter-labs/zksync-2-dev/issues/1895)) ([e05fb64](https://github.com/matter-labs/zksync-2-dev/commit/e05fb642c03acd07ad800735648c00eea20d90da)) +* **contract-verifier:** vyper contract verification ([#2041](https://github.com/matter-labs/zksync-2-dev/issues/2041)) ([f22d3ec](https://github.com/matter-labs/zksync-2-dev/commit/f22d3ecd272041185958b1d79e13fafafb191cdb)) +* **external node:** Config fixups ([#2037](https://github.com/matter-labs/zksync-2-dev/issues/2037)) ([fe050e4](https://github.com/matter-labs/zksync-2-dev/commit/fe050e415e15fa090a81ffa21c11f8d926c3e964)) +* **house-keeper:** added scheduler dependency tracker and queuer ([#2045](https://github.com/matter-labs/zksync-2-dev/issues/2045)) ([ca23434](https://github.com/matter-labs/zksync-2-dev/commit/ca23434532d97506480b25d22f3a016c42232de1)) +* **house-keeper:** move FRI witness-gen leaf jobs to queued when ready ([#2020](https://github.com/matter-labs/zksync-2-dev/issues/2020)) ([f1c2287](https://github.com/matter-labs/zksync-2-dev/commit/f1c2287ab0edaeb8b96d264f98cab86333d86439)) +* **house-keeper:** re-queue stuck FRI prover & witness-gen jobs ([#2047](https://github.com/matter-labs/zksync-2-dev/issues/2047)) ([4d38ff9](https://github.com/matter-labs/zksync-2-dev/commit/4d38ff949c9a0a71c1439db14bb9e24eda980fbd)) +* **housekeeper:** Move node jobs from waiting to queued ([#2042](https://github.com/matter-labs/zksync-2-dev/issues/2042)) ([03bee75](https://github.com/matter-labs/zksync-2-dev/commit/03bee7514ce55119ea84184181b5056f767616aa)) +* **prover-fri:** add is_node_final_proof for scheduler proving ([#2054](https://github.com/matter-labs/zksync-2-dev/issues/2054)) ([57a8686](https://github.com/matter-labs/zksync-2-dev/commit/57a86862ddea3c9887be7a0623fe88691ec0680d)) +* **prover-fri:** added leaf layer proof gen and used cached setup data ([#2005](https://github.com/matter-labs/zksync-2-dev/issues/2005)) ([7512769](https://github.com/matter-labs/zksync-2-dev/commit/75127696d3aef473423d252c17fc1fa9dceed563)) +* **setup-data:** add logic for generating VK's and setup-data for node+scheduler circuit ([#2035](https://github.com/matter-labs/zksync-2-dev/issues/2035)) ([d627826](https://github.com/matter-labs/zksync-2-dev/commit/d627826ce64d08c44fc83744c1c6ae464418db3a)) +* **test_node:** Added ability to fetch & apply mainnet/testnet transaction ([#2012](https://github.com/matter-labs/zksync-2-dev/issues/2012)) ([90dd419](https://github.com/matter-labs/zksync-2-dev/commit/90dd41976a3a73eb7ea4158fc86c762d31fd507b)) +* **witness-gen:** add impl for scheduler witness-gen ([#2051](https://github.com/matter-labs/zksync-2-dev/issues/2051)) ([f22704c](https://github.com/matter-labs/zksync-2-dev/commit/f22704cc4c30d8928996c8db652c47622c2890a7)) +* **witness-gen:** impl node witness-gen ([#1991](https://github.com/matter-labs/zksync-2-dev/issues/1991)) ([4118022](https://github.com/matter-labs/zksync-2-dev/commit/4118022cba3f205f9b57e0cc8fa3103ac8bc3026)) + + +### Bug Fixes + +* **api:** unconditionally allow getLogs for single block ([#2039](https://github.com/matter-labs/zksync-2-dev/issues/2039)) ([70dfb19](https://github.com/matter-labs/zksync-2-dev/commit/70dfb19b889b9f90bd5283ef532dca494da57e0a)) +* **eth-sender:** fix next nonce loading ([#2030](https://github.com/matter-labs/zksync-2-dev/issues/2030)) ([2b639ac](https://github.com/matter-labs/zksync-2-dev/commit/2b639ac56fa831628773e7720c16426f488cc9db)) +* **external node:** Make sure that batch status updater progress is processed ([#2024](https://github.com/matter-labs/zksync-2-dev/issues/2024)) ([8ed95c5](https://github.com/matter-labs/zksync-2-dev/commit/8ed95c52962b49d4394e951f41e32ac67c7b832d)) +* make tx_index_in_l1_batch_optional ([#2053](https://github.com/matter-labs/zksync-2-dev/issues/2053)) ([c0972f6](https://github.com/matter-labs/zksync-2-dev/commit/c0972f6ccf99b4790d97c1a55af2eb87b812efbd)) +* **prover:** Add more traces for troubleshooting prover startup ([#2031](https://github.com/matter-labs/zksync-2-dev/issues/2031)) ([9c7e832](https://github.com/matter-labs/zksync-2-dev/commit/9c7e832f4f9cbf6dba311f3a105afbc07ef38863)) +* **prover:** Make socket_listener tokio compliant ([#2049](https://github.com/matter-labs/zksync-2-dev/issues/2049)) ([3c7fa82](https://github.com/matter-labs/zksync-2-dev/commit/3c7fa8212126a2fec0537bde0bd210a5f6598643)) +* **prover:** Split logging and sentry, add logging to prover subsystems and remove unnecessary traces ([#2033](https://github.com/matter-labs/zksync-2-dev/issues/2033)) ([15538b5](https://github.com/matter-labs/zksync-2-dev/commit/15538b542f708e8f9667f8b2c9e7ce2fa85eba6a)) + + +### Performance Improvements + +* **db:** Cache L1 batch number in `PostgresStorage` ([#2028](https://github.com/matter-labs/zksync-2-dev/issues/2028)) ([092a32c](https://github.com/matter-labs/zksync-2-dev/commit/092a32ced4d10e420e284360e3d2ab8f21eed71a)) + + +### Reverts + +* **contract-verifier:** vyper contract verification ([#2041](https://github.com/matter-labs/zksync-2-dev/issues/2041)) ([#2057](https://github.com/matter-labs/zksync-2-dev/issues/2057)) ([c263643](https://github.com/matter-labs/zksync-2-dev/commit/c263643d3dcc1bc34588ff7607537ef0f82377a4)) + +## [5.5.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.5.0...v5.5.1) (2023-06-22) + + +### Bug Fixes + +* **state-keeper:** Do not treat default CF as obsolete ([#2017](https://github.com/matter-labs/zksync-2-dev/issues/2017)) ([8b53210](https://github.com/matter-labs/zksync-2-dev/commit/8b53210f1a587bd908e3dfe5506ba99e2c61fdc6)) + +## [5.5.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.4.1...v5.5.0) (2023-06-22) + + +### Features + +* **external node:** create a single method to fetch all miniblock data required ([#1999](https://github.com/matter-labs/zksync-2-dev/issues/1999)) ([e4912f1](https://github.com/matter-labs/zksync-2-dev/commit/e4912f1a427ce0f46ccabb122f15a54650f9ec02)) +* **prover-setup-data:** added binary to generate prover setup data ([#1954](https://github.com/matter-labs/zksync-2-dev/issues/1954)) ([d3773d4](https://github.com/matter-labs/zksync-2-dev/commit/d3773d435c18434c8f39515eb35021fa74428d69)) + + +### Bug Fixes + +* **merkle tree:** Fix opening RocksDB with obsolete CFs ([#2007](https://github.com/matter-labs/zksync-2-dev/issues/2007)) ([667fe4c](https://github.com/matter-labs/zksync-2-dev/commit/667fe4ce14a09609c1c3cf7b34c26fdc488ac6b3)) + + +### Performance Improvements + +* **merkle tree:** Prune old tree versions ([#1984](https://github.com/matter-labs/zksync-2-dev/issues/1984)) ([55ddb20](https://github.com/matter-labs/zksync-2-dev/commit/55ddb208a9325e3cfbe28917a841a4773cc88066)) + +## [5.4.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.4.0...v5.4.1) (2023-06-21) + + +### Bug Fixes + +* **api:** Acquire VM permit on the method handler level ([#1997](https://github.com/matter-labs/zksync-2-dev/issues/1997)) ([5701593](https://github.com/matter-labs/zksync-2-dev/commit/570159317d0ce1e1b5694e6e1f5dfacf3e7f92af)) + +## [5.4.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.3.0...v5.4.0) (2023-06-20) + + +### Features + +* **eth:** use `finalized` block tag ([#1981](https://github.com/matter-labs/zksync-2-dev/issues/1981)) ([8e83e42](https://github.com/matter-labs/zksync-2-dev/commit/8e83e426992c32d763c019e80778aeeab544f6ce)) +* **fri-vk:** added logic for generating recursive vk ([#1987](https://github.com/matter-labs/zksync-2-dev/issues/1987)) ([4d3f07e](https://github.com/matter-labs/zksync-2-dev/commit/4d3f07e766c0c70e83c2b18ee648a37d6e3fe449)) +* **testing:** In memory node with forking ([#1989](https://github.com/matter-labs/zksync-2-dev/issues/1989)) ([79820b5](https://github.com/matter-labs/zksync-2-dev/commit/79820b59f9569bba22538522def2d07214a9be32)) +* **witness-gen:** added impl for leaf aggregation witness-gen ([#1985](https://github.com/matter-labs/zksync-2-dev/issues/1985)) ([033fb73](https://github.com/matter-labs/zksync-2-dev/commit/033fb73d794b157fa3a7766f8a2cc029fedebc52)) + + +### Bug Fixes + +* **prover:** Fix tokio usage in prover ([#1998](https://github.com/matter-labs/zksync-2-dev/issues/1998)) ([c905497](https://github.com/matter-labs/zksync-2-dev/commit/c905497e6e650dad6394da397d3cb3d1691c536e)) + +## [5.3.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.2.1...v5.3.0) (2023-06-16) + + +### Features + +* **api:** Implement concurrent VM limiter ([#1982](https://github.com/matter-labs/zksync-2-dev/issues/1982)) ([c818fec](https://github.com/matter-labs/zksync-2-dev/commit/c818feccd63674bb45d0b0ac293cc5ee76fcd63d)) +* **prover:** integrate new prover for basic circuit ([#1965](https://github.com/matter-labs/zksync-2-dev/issues/1965)) ([7d63db7](https://github.com/matter-labs/zksync-2-dev/commit/7d63db7122619d36b3af92b28ae85f130284a0ea)) +* **witness-gen:** enable basic circuit witness-gen by copying input to shadow tables ([#1970](https://github.com/matter-labs/zksync-2-dev/issues/1970)) ([1c818a2](https://github.com/matter-labs/zksync-2-dev/commit/1c818a28eac7a81283ba3b890340707ac65c6fb3)) + + +### Bug Fixes + +* **circuits:** mark_witness_job_as_failed must use different dbs ([#1974](https://github.com/matter-labs/zksync-2-dev/issues/1974)) ([143f319](https://github.com/matter-labs/zksync-2-dev/commit/143f3195d3393312364a60a19fc4bbf5e78f5212)) +* **eth-sender:** simplify logic for getting executed blocks ([#1973](https://github.com/matter-labs/zksync-2-dev/issues/1973)) ([2781006](https://github.com/matter-labs/zksync-2-dev/commit/2781006c918553e54f20afdbe80cca7d64ecc389)) +* **loadtest:** Make fail fast semantics optional ([#1983](https://github.com/matter-labs/zksync-2-dev/issues/1983)) ([ec4037c](https://github.com/matter-labs/zksync-2-dev/commit/ec4037ca0d9dc148eda3ca9e04380302574e03d8)) +* **manifests:** Fix Package Manifests ([#1947](https://github.com/matter-labs/zksync-2-dev/issues/1947)) ([57a66e4](https://github.com/matter-labs/zksync-2-dev/commit/57a66e4487caef59fd3836535ad604da5f1d633f)) + +## [5.2.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.2.0...v5.2.1) (2023-06-15) + + +### Bug Fixes + +* **db:** add missing indices ([#1966](https://github.com/matter-labs/zksync-2-dev/issues/1966)) ([1580e89](https://github.com/matter-labs/zksync-2-dev/commit/1580e893609d5f1e813443f54e3172f3704d6626)) +* **eth-sender:** fix get_ready_for_execute_blocks if no ready blocks ([#1972](https://github.com/matter-labs/zksync-2-dev/issues/1972)) ([cd9262a](https://github.com/matter-labs/zksync-2-dev/commit/cd9262ac2477e40b2b3156505ee22b0b90f186ab)) +* **external node:** Fix external_node_synced metric ([#1967](https://github.com/matter-labs/zksync-2-dev/issues/1967)) ([bacb3f5](https://github.com/matter-labs/zksync-2-dev/commit/bacb3f5f4fcce651dfffed7cf63436f3fa680b8e)) +* **loadtest:** cast to u128, not to u64 to avoid overflow ([#1969](https://github.com/matter-labs/zksync-2-dev/issues/1969)) ([90f73c0](https://github.com/matter-labs/zksync-2-dev/commit/90f73c0fb89888624e13c8c13f7a2aa6ee29522d)) +* **witness-gen:** use both db in witness-gen ([#1971](https://github.com/matter-labs/zksync-2-dev/issues/1971)) ([79f1843](https://github.com/matter-labs/zksync-2-dev/commit/79f1843f28f97d3da074c580623f5bbf4b12f6aa)) + +## [5.2.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.1.0...v5.2.0) (2023-06-14) + + +### Features + +* **loadtest:** enhance loadtest observability for partners in DBS ([#1948](https://github.com/matter-labs/zksync-2-dev/issues/1948)) ([d3e4688](https://github.com/matter-labs/zksync-2-dev/commit/d3e4688e870d3414c211ecd2d70bdda4dc0fd40a)) +* Make DAL interface async ([#1938](https://github.com/matter-labs/zksync-2-dev/issues/1938)) ([0e078ca](https://github.com/matter-labs/zksync-2-dev/commit/0e078ca3f7da9e218b952d7a9d307b927847c914)) +* **merkle tree:** Collect stats on inserted node level ([#1964](https://github.com/matter-labs/zksync-2-dev/issues/1964)) ([ecf474d](https://github.com/matter-labs/zksync-2-dev/commit/ecf474dbe2b72b31c34e340c2b79e060a96c560e)) +* **prover:** Split prover subsystems in it's own db under main branch ([#1951](https://github.com/matter-labs/zksync-2-dev/issues/1951)) ([b0d329d](https://github.com/matter-labs/zksync-2-dev/commit/b0d329def1791e57a11e1fd79eb38c560f17b74c)) +* vm 1.3.3 update + initial witness generator for 1.4 ([#1928](https://github.com/matter-labs/zksync-2-dev/issues/1928)) ([46e260b](https://github.com/matter-labs/zksync-2-dev/commit/46e260b7b5a6b2940e4e6002d58d05166dbf0a62)) +* **witness-gen:** basic-circuit witness-gen for FRI prover ([#1937](https://github.com/matter-labs/zksync-2-dev/issues/1937)) ([5b5fb28](https://github.com/matter-labs/zksync-2-dev/commit/5b5fb28cf02be4704428a92ffbf898448b367e2b)) + + +### Bug Fixes + +* **api:** use all tokens in api ([#1959](https://github.com/matter-labs/zksync-2-dev/issues/1959)) ([cc11149](https://github.com/matter-labs/zksync-2-dev/commit/cc11149c14484dd4da8397311cbd187548c7d371)) +* **db:** `storage_logs_contract_address_tx_hash_idx` index ([#1956](https://github.com/matter-labs/zksync-2-dev/issues/1956)) ([6cc5edd](https://github.com/matter-labs/zksync-2-dev/commit/6cc5eddd191b4304fbe8f524745614ceee9a8cae)) +* **eth-sender:** Do not send execute tx with a gap between batches ([#1934](https://github.com/matter-labs/zksync-2-dev/issues/1934)) ([ab8dc59](https://github.com/matter-labs/zksync-2-dev/commit/ab8dc59e7f7ad9ee4fe0aa053a111855c1f91c04)) +* **eth-sender:** Move getting base system contracts to the loop itera… ([#1958](https://github.com/matter-labs/zksync-2-dev/issues/1958)) ([292122a](https://github.com/matter-labs/zksync-2-dev/commit/292122a89d23b75bb126abcf5b96bc8a1e1c71ac)) +* **external node:** Separate batch status updater and fetcher ([#1961](https://github.com/matter-labs/zksync-2-dev/issues/1961)) ([2c59d4c](https://github.com/matter-labs/zksync-2-dev/commit/2c59d4c2e92a5e6716fb131f537a6ddaf73297ab)) +* **metrics:** switch to pull based metrics in prover & synthesizer ([#1918](https://github.com/matter-labs/zksync-2-dev/issues/1918)) ([e634c73](https://github.com/matter-labs/zksync-2-dev/commit/e634c73b21587bc0aecbd4e43b17ff711d346ad1)) + +## [5.1.0](https://github.com/matter-labs/zksync-2-dev/compare/v5.0.1...v5.1.0) (2023-06-07) + + +### Features + +* **contract-verifier:** add zksolc v1.3.11 ([#1936](https://github.com/matter-labs/zksync-2-dev/issues/1936)) ([4a13986](https://github.com/matter-labs/zksync-2-dev/commit/4a139868217414bcf3aa77c75aac05722ea4a096)) +* **explorer-api:** include miniblock timestamp to explorer api ([#1894](https://github.com/matter-labs/zksync-2-dev/issues/1894)) ([1e86627](https://github.com/matter-labs/zksync-2-dev/commit/1e86627c70823d557ead871696a726b4aee29bec)) +* **external node:** Explicitly state that EN is alpha ([#1917](https://github.com/matter-labs/zksync-2-dev/issues/1917)) ([b81dccd](https://github.com/matter-labs/zksync-2-dev/commit/b81dccd8c076d7c9e43f0bebd44eabd88096b054)) +* **external node:** Prepare docker image for public use ([#1906](https://github.com/matter-labs/zksync-2-dev/issues/1906)) ([1fcf5b5](https://github.com/matter-labs/zksync-2-dev/commit/1fcf5b543bfea63ad305eb868487f53ad0ba223a)) +* **loadtest:** run loadtest on stage2 daily ([#1852](https://github.com/matter-labs/zksync-2-dev/issues/1852)) ([196d9e4](https://github.com/matter-labs/zksync-2-dev/commit/196d9e40ec2a57075b061beba06e675c78564b6a)) +* **merkle tree:** Add tag to tree manifest ([#1873](https://github.com/matter-labs/zksync-2-dev/issues/1873)) ([cd18a63](https://github.com/matter-labs/zksync-2-dev/commit/cd18a639a262c8ffea2d3e55f80d8b454fe22a1a)) +* **vk:** added vk generator for new prover ([#1931](https://github.com/matter-labs/zksync-2-dev/issues/1931)) ([669e976](https://github.com/matter-labs/zksync-2-dev/commit/669e97626dc63a2f566f16957dd61ac94eabc226)) + + +### Bug Fixes + +* **external node checker:** Fix Sync Bug ([#1924](https://github.com/matter-labs/zksync-2-dev/issues/1924)) ([1a37f6b](https://github.com/matter-labs/zksync-2-dev/commit/1a37f6ba87c8a3aa0c0e30682db9e8a57b3c462a)) +* Remove binary search from logs ([#1911](https://github.com/matter-labs/zksync-2-dev/issues/1911)) ([f3553f5](https://github.com/matter-labs/zksync-2-dev/commit/f3553f57c3f40e292e51ab18cc81ba3fdac20dbb)) + + +### Performance Improvements + +* Box storage and event logs ([#1887](https://github.com/matter-labs/zksync-2-dev/issues/1887)) ([13e7078](https://github.com/matter-labs/zksync-2-dev/commit/13e70780704037cdb32ab91427ef2bb1d6a2d622)) +* improve performance of repeated far calls ([#1902](https://github.com/matter-labs/zksync-2-dev/issues/1902)) ([b9b96e7](https://github.com/matter-labs/zksync-2-dev/commit/b9b96e7d230fdbd705236425a65a8e698cdfdbb9)) + ## [5.0.1](https://github.com/matter-labs/zksync-2-dev/compare/v5.0.0...v5.0.1) (2023-05-30) diff --git a/core/bin/admin-tools/Cargo.toml b/core/bin/admin-tools/Cargo.toml index 23c3bdcb9b25..9748a57b0b54 100644 --- a/core/bin/admin-tools/Cargo.toml +++ b/core/bin/admin-tools/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] description = "Admin tools CLI for zkSync" diff --git a/core/bin/admin-tools/src/application.rs b/core/bin/admin-tools/src/application.rs index 1dcb1a1ae741..628257c11145 100644 --- a/core/bin/admin-tools/src/application.rs +++ b/core/bin/admin-tools/src/application.rs @@ -24,7 +24,7 @@ pub fn create_app<'a>(profile: &Option) -> Result, AppError> { let tokio = tokio::runtime::Runtime::new().map_err(|x| AppError::Init(InitError::IO(x)))?; - let db = zksync_dal::StorageProcessor::establish_connection_blocking(true); + let db = tokio.block_on(zksync_dal::StorageProcessor::establish_connection(true)); let invocation = std::process::Command::new("stty") .arg("-f") diff --git a/core/bin/admin-tools/src/blocks.rs b/core/bin/admin-tools/src/blocks.rs index 7a585b0a5d54..7a03a7a41913 100644 --- a/core/bin/admin-tools/src/blocks.rs +++ b/core/bin/admin-tools/src/blocks.rs @@ -81,18 +81,24 @@ pub fn get_block_info(id: L1BatchNumber, app: &mut App) -> Result Result { - let stats = app.db.prover_dal().get_prover_jobs_stats(); - let stats_extended = app - .db - .prover_dal() - .get_extended_stats() + let handle = app.tokio.handle(); + let stats = handle.block_on(app.db.prover_dal().get_prover_jobs_stats()); + let stats_extended = handle + .block_on(app.db.prover_dal().get_extended_stats()) .map_err(|x| AppError::Db(x.to_string()))?; Ok(ProverStats { @@ -140,9 +139,9 @@ pub fn print_stats(stats: &ProverStats, term_width: u32) -> Result<(), AppError> } pub fn get_jobs(app: &mut App, opts: GetProverJobsParams) -> Result, AppError> { - app.db - .prover_dal() - .get_jobs(opts) + let handle = app.tokio.handle(); + handle + .block_on(app.db.prover_dal().get_jobs(opts)) .map_err(|x| AppError::Db(x.to_string())) } diff --git a/core/bin/circuit_synthesizer/Cargo.lock b/core/bin/circuit_synthesizer/Cargo.lock index 91f957301957..38c65a231498 100644 --- a/core/bin/circuit_synthesizer/Cargo.lock +++ b/core/bin/circuit_synthesizer/Cargo.lock @@ -77,7 +77,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -91,6 +91,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -111,14 +126,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#d28e9a53574341924ebf7f648b2128db8e65b72c" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -143,15 +158,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -170,69 +185,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "async-channel" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" -dependencies = [ - "async-lock", - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.12.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" -dependencies = [ - "async-lock", - "autocfg 1.1.0", - "concurrent-queue", - "futures-lite", - "libc", - "log", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "windows-sys 0.42.0", -] +checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" [[package]] name = "async-lock" @@ -243,68 +198,11 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-native-tls" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9e7a929bd34c68a82d58a4de7f86fffdaf97fb2af850162a7bb19dd7269b33" -dependencies = [ - "async-std", - "native-tls", - "thiserror", - "url", -] - -[[package]] -name = "async-process" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" -dependencies = [ - "async-io", - "async-lock", - "autocfg 1.1.0", - "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "libc", - "signal-hook", - "windows-sys 0.42.0", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils 0.8.15", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -313,30 +211,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] -[[package]] -name = "async-task" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" - [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -348,12 +240,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-waker" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" - [[package]] name = "atty" version = "0.2.14" @@ -409,9 +295,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -433,14 +319,14 @@ name = "bellman_ce" version = "0.3.2" source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bit-vec", "blake2s_const", "blake2s_simd", "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.27", + "futures 0.3.28", "hex", "lazy_static", "num_cpus", @@ -472,25 +358,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2 1.0.52", - "quote 1.0.26", - "regex", - "rustc-hash", - "shlex", -] - [[package]] name = "bit-vec" version = "0.6.3" @@ -508,9 +375,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.2.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" [[package]] name = "bitvec" @@ -541,7 +408,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -611,25 +478,11 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" -[[package]] -name = "blocking" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" -dependencies = [ - "async-channel", - "async-lock", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", -] - [[package]] name = "bstr" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "serde", @@ -637,9 +490,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -659,34 +512,11 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -dependencies = [ - "jobserver", -] - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] [[package]] name = "cfg-if" @@ -702,13 +532,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "rustc-serialize", "serde", @@ -734,17 +564,6 @@ dependencies = [ "bellman_ce", ] -[[package]] -name = "clang-sys" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "2.34.0" @@ -794,25 +613,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - -[[package]] -name = "concurrent-queue" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" -dependencies = [ - "crossbeam-utils 0.8.15", -] - [[package]] name = "const-oid" version = "0.7.1" @@ -849,15 +649,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -898,11 +698,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.14", + "crossbeam-epoch 0.9.15", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -917,12 +717,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -943,8 +743,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -964,14 +764,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", - "memoffset 0.8.0", + "crossbeam-utils 0.8.16", + "memoffset 0.9.0", "scopeguard", ] @@ -993,7 +793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1009,9 +809,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -1087,22 +887,24 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#5363449e157ea68ffc86b9a146448f829ec03de9" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] [[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +name = "cs_derive" +version = "0.1.0" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ - "quote 1.0.26", + "proc-macro-error", + "proc-macro2 1.0.60", + "quote 1.0.28", + "serde", "syn 1.0.109", ] @@ -1117,56 +919,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix", - "windows-sys 0.45.0", -] - -[[package]] -name = "cxx" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" -dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "windows-sys 0.48.0", ] [[package]] @@ -1187,8 +945,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -1200,7 +958,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -1241,8 +999,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -1253,8 +1011,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustc_version", "syn 1.0.109", ] @@ -1270,9 +1028,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1335,7 +1093,7 @@ dependencies = [ "base16ct", "crypto-bigint 0.4.9", "der 0.6.1", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array", "group", @@ -1392,13 +1150,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1500,8 +1258,8 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] @@ -1559,9 +1317,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1616,9 +1374,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1631,9 +1389,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1641,15 +1399,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1670,47 +1428,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" - -[[package]] -name = "futures-lite" -version = "1.12.0" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -1724,9 +1467,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1743,9 +1486,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1764,9 +1507,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1775,15 +1518,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" - -[[package]] -name = "glob" -version = "0.3.1" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "globset" @@ -1791,7 +1528,7 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ - "aho-corasick", + "aho-corasick 0.7.20", "bstr", "fnv", "log", @@ -1845,11 +1582,11 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.9.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.10.0" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#d747613dc8e1a70919005f27577c1fcb7676a0b9" dependencies = [ "async-trait", - "base64 0.21.0", + "base64 0.21.2", "google-cloud-metadata", "google-cloud-token", "home", @@ -1858,7 +1595,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "tokio", "tracing", "urlencoding", @@ -1866,8 +1603,8 @@ dependencies = [ [[package]] name = "google-cloud-default" -version = "0.1.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.4.0" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#d747613dc8e1a70919005f27577c1fcb7676a0b9" dependencies = [ "async-trait", "google-cloud-auth", @@ -1878,7 +1615,7 @@ dependencies = [ [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#d747613dc8e1a70919005f27577c1fcb7676a0b9" dependencies = [ "reqwest", "thiserror", @@ -1887,11 +1624,11 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.10.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.11.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#d747613dc8e1a70919005f27577c1fcb7676a0b9" dependencies = [ "async-stream", - "base64 0.21.0", + "base64 0.21.2", "bytes", "futures-util", "google-cloud-token", @@ -1906,7 +1643,7 @@ dependencies = [ "serde_json", "sha2 0.10.6", "thiserror", - "time 0.3.20", + "time 0.3.22", "tokio", "tracing", "url", @@ -1914,8 +1651,8 @@ dependencies = [ [[package]] name = "google-cloud-token" -version = "0.1.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.1.1" +source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#d747613dc8e1a70919005f27577c1fcb7676a0b9" dependencies = [ "async-trait", ] @@ -1933,9 +1670,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -1946,15 +1683,15 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -2087,16 +1824,16 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "home" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -2152,9 +1889,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -2183,13 +1920,26 @@ dependencies = [ "http", "hyper", "log", - "rustls", + "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", "webpki-roots", ] +[[package]] +name = "hyper-rustls" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +dependencies = [ + "http", + "hyper", + "rustls 0.21.2", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2217,26 +1967,25 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2258,9 +2007,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2299,16 +2048,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -2325,19 +2074,20 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "ipnetwork" @@ -2347,14 +2097,14 @@ checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2378,20 +2128,11 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" -[[package]] -name = "jobserver" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2402,7 +2143,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "futures-executor", "futures-util", "log", @@ -2447,8 +2188,8 @@ dependencies = [ "soketto", "thiserror", "tokio", - "tokio-rustls", - "tokio-util 0.7.7", + "tokio-rustls 0.23.4", + "tokio-util 0.7.8", "tracing", "webpki-roots", ] @@ -2460,7 +2201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" dependencies = [ "anyhow", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "async-lock", "async-trait", "beef", @@ -2490,7 +2231,7 @@ checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" dependencies = [ "async-trait", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.2", "jsonrpsee-core", "jsonrpsee-types", "rustc-hash", @@ -2509,8 +2250,8 @@ checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.1", "proc-macro-crate", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2531,7 +2272,7 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower", "tracing", ] @@ -2575,11 +2316,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -2601,22 +2342,13 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -2626,91 +2358,41 @@ dependencies = [ "spin", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.140" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" - -[[package]] -name = "libloading" -version = "0.7.4" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi", -] +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" - -[[package]] -name = "librocksdb-sys" -version = "0.6.1+6.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", -] - -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "local-ip-address" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -2718,13 +2400,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", - "value-bag", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach" @@ -2790,9 +2468,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -2805,7 +2483,7 @@ checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ "ahash", "metrics-macros", - "portable-atomic", + "portable-atomic 0.3.20", ] [[package]] @@ -2820,7 +2498,7 @@ dependencies = [ "metrics", "metrics-util", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "thiserror", "tokio", @@ -2833,8 +2511,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2844,22 +2522,22 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", "hashbrown 0.12.3", "metrics", "num_cpus", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "sketches-ddsketch", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -2888,14 +2566,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2924,12 +2601,27 @@ dependencies = [ [[package]] name = "neli" -version = "0.5.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" dependencies = [ "byteorder", "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +dependencies = [ + "either", + "proc-macro2 1.0.60", + "quote 1.0.28", + "serde", + "syn 1.0.109", ] [[package]] @@ -3075,8 +2767,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3148,18 +2840,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3169,9 +2861,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -3184,13 +2876,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3201,11 +2893,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "pkg-config", @@ -3219,7 +2910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "futures-channel", "futures-executor", "futures-util", @@ -3251,7 +2942,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.27", + "futures 0.3.28", "futures-util", "http", "opentelemetry", @@ -3276,9 +2967,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" +checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" dependencies = [ "log", "serde", @@ -3334,7 +3025,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -3349,17 +3040,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - [[package]] name = "parking_lot" version = "0.11.2" @@ -3378,7 +3063,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -3390,22 +3075,22 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets", ] [[package]] @@ -3446,12 +3131,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -3472,15 +3151,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -3488,9 +3167,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -3498,22 +3177,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -3532,22 +3211,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3596,31 +3275,24 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] -name = "polling" -version = "2.6.0" +name = "portable-atomic" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" dependencies = [ - "autocfg 1.1.0", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.45.0", + "portable-atomic 1.3.3", ] [[package]] name = "portable-atomic" -version = "0.3.19" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -3658,8 +3330,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -3670,8 +3342,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "version_check", ] @@ -3692,9 +3364,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -3748,8 +3420,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3766,17 +3438,17 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#d28e9a53574341924ebf7f648b2128db8e65b72c" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "log", "num_cpus", "rand 0.4.6", "serde", "serde_json", - "zkevm_test_harness", + "zkevm_test_harness 1.3.2", ] [[package]] @@ -3785,7 +3457,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "libc", "mach", "once_cell", @@ -3806,11 +3478,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", ] [[package]] @@ -3933,7 +3605,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -4032,9 +3704,9 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "num_cpus", ] @@ -4056,26 +3728,35 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.2" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce168fea28d3e05f158bda4576cf0c844d5045bc2cc3620fa0292ed5bb5814c" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.2", "memchr", - "regex-syntax", + "regex-syntax 0.7.2", ] [[package]] @@ -4084,22 +3765,28 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -4108,7 +3795,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.24.0", "hyper-tls", "ipnet", "js-sys", @@ -4119,15 +3806,15 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", + "rustls 0.21.2", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.7", + "tokio-rustls 0.24.1", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", @@ -4144,7 +3831,7 @@ version = "0.4.1" source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" dependencies = [ "addchain", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "blake2 0.10.6", "byteorder", "franklin-crypto", @@ -4205,16 +3892,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rocksdb" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rsa" version = "0.6.1" @@ -4222,7 +3899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ "byteorder", - "digest 0.10.6", + "digest 0.10.7", "num-bigint-dig", "num-integer", "num-iter", @@ -4237,9 +3914,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -4270,16 +3947,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4294,11 +3971,23 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -4312,7 +4001,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -4351,12 +4050,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.5.0" @@ -4427,9 +4120,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -4440,9 +4133,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -4462,9 +4155,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "sentry" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" +checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" dependencies = [ "httpdate", "native-tls", @@ -4474,15 +4167,16 @@ dependencies = [ "sentry-core", "sentry-debug-images", "sentry-panic", + "sentry-tracing", "tokio", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" +checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" dependencies = [ "backtrace", "once_cell", @@ -4492,9 +4186,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" +checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" dependencies = [ "hostname", "libc", @@ -4506,9 +4200,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" +checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" dependencies = [ "once_cell", "rand 0.8.5", @@ -4519,9 +4213,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" dependencies = [ "findshlibs", "once_cell", @@ -4530,56 +4224,68 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.30.0" +version = "0.31.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" +dependencies = [ + "sentry-backtrace", + "sentry-core", +] + +[[package]] +name = "sentry-tracing" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" +checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" dependencies = [ "sentry-backtrace", "sentry-core", + "tracing-core", + "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" +checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" dependencies = [ "debugid", - "getrandom 0.2.8", + "getrandom 0.2.10", "hex", "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa 1.0.6", @@ -4616,8 +4322,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4642,7 +4348,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4666,7 +4372,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4687,7 +4393,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -4700,22 +4406,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shlex" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" - -[[package]] -name = "signal-hook" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -4731,7 +4421,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -4744,14 +4434,14 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "thiserror", - "time 0.3.20", + "time 0.3.22", ] [[package]] name = "sketches-ddsketch" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" @@ -4786,7 +4476,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.27", + "futures 0.3.28", "http", "httparse", "log", @@ -4862,9 +4552,9 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "dirs", "either", "futures-channel", @@ -4895,6 +4585,7 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", + "tokio-stream", "url", "whoami", ] @@ -4910,8 +4601,8 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "serde_json", "sha2 0.9.9", @@ -4927,9 +4618,10 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" dependencies = [ - "async-native-tls", - "async-std", "native-tls", + "once_cell", + "tokio", + "tokio-native-tls", ] [[package]] @@ -4979,8 +4671,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -5000,8 +4692,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -5029,29 +4721,55 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "sync_vm" version = "1.3.2" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#5363449e157ea68ffc86b9a146448f829ec03de9" +dependencies = [ + "arrayvec 0.7.3", + "cs_derive 0.1.0 (git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2)", + "derivative", + "franklin-crypto", + "hex", + "itertools", + "num-bigint 0.4.3", + "num-derive 0.3.3", + "num-integer", + "num-traits", + "once_cell", + "rand 0.4.6", + "rescue_poseidon", + "serde", + "sha2 0.10.6", + "sha3 0.10.6", + "smallvec", + "zk_evm 1.3.2", + "zkevm_opcode_defs", +] + +[[package]] +name = "sync_vm" +version = "1.3.3" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ - "arrayvec 0.7.2", - "cs_derive", + "arrayvec 0.7.3", + "cs_derive 0.1.0 (git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3)", "derivative", "franklin-crypto", "hex", @@ -5067,7 +4785,7 @@ dependencies = [ "sha2 0.10.6", "sha3 0.10.6", "smallvec", - "zk_evm", + "zk_evm 1.3.3", "zkevm_opcode_defs", ] @@ -5079,15 +4797,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg 1.1.0", "cfg-if 1.0.0", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -5101,12 +4820,12 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" +checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -5121,22 +4840,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -5161,9 +4880,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa 1.0.6", "serde", @@ -5173,15 +4892,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -5221,9 +4940,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg 1.1.0", "bytes", @@ -5254,9 +4973,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 2.0.12", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -5275,16 +4994,26 @@ version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls", + "rustls 0.20.8", "tokio", "webpki", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.2", + "tokio", +] + [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -5307,9 +5036,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -5322,15 +5051,15 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" -version = "0.19.6" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -5374,9 +5103,9 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", "prost-build", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -5394,7 +5123,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower-layer", "tower-service", "tracing", @@ -5427,20 +5156,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -5493,9 +5222,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -5506,7 +5235,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.20", + "time 0.3.22", "tracing", "tracing-core", "tracing-log", @@ -5563,15 +5292,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -5614,11 +5343,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "native-tls", "once_cell", @@ -5627,12 +5356,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", "serde", ] @@ -5645,11 +5374,11 @@ checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "serde", ] @@ -5659,16 +5388,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - [[package]] name = "vcpkg" version = "0.2.15" @@ -5702,41 +5421,12 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "vm" -version = "0.1.0" -dependencies = [ - "hex", - "itertools", - "metrics", - "once_cell", - "thiserror", - "tracing", - "vlog", - "zk_evm", - "zkevm-assembly", - "zksync_config", - "zksync_contracts", - "zksync_crypto", - "zksync_state", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -5760,9 +5450,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5770,24 +5460,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5797,32 +5487,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -5839,9 +5529,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -5853,13 +5543,13 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "base64 0.13.1", "bytes", "derive_more", "ethabi", "ethereum-types", - "futures 0.3.27", + "futures 0.3.28", "futures-timer", "headers", "hex", @@ -5949,6 +5639,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-sys" version = "0.42.0" @@ -5964,37 +5663,13 @@ dependencies = [ "windows_x86_64_msvc 0.42.2", ] -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets", ] [[package]] @@ -6098,9 +5773,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -6122,9 +5797,9 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zk_evm" @@ -6139,10 +5814,36 @@ dependencies = [ "zkevm_opcode_defs", ] +[[package]] +name = "zk_evm" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.3#9a1eaa98acb9e3280dbbde5b132cbf64e15fe96e" +dependencies = [ + "anyhow", + "lazy_static", + "num 0.4.0", + "serde", + "serde_json", + "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs", +] + +[[package]] +name = "zk_evm_abstractions" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zk_evm_abstractions.git#31361360123b4f2532ab345522c9b19510f04c31" +dependencies = [ + "anyhow", + "serde", + "static_assertions", + "zkevm_opcode_defs", +] + [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#a5f2c38305fa672ec23cf3d4d804eb50e591288c" dependencies = [ "env_logger 0.9.3", "hex", @@ -6164,7 +5865,7 @@ name = "zkevm_opcode_defs" version = "1.3.2" source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags 2.2.1", + "bitflags 2.3.2", "blake2 0.10.6", "ethereum-types", "k256", @@ -6176,7 +5877,34 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.3.2" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#5b6a44d42b12b944c69fa1b1008b643af40ef6f0" +dependencies = [ + "bincode", + "circuit_testing", + "codegen 0.2.0", + "crossbeam 0.8.2", + "derivative", + "env_logger 0.10.0", + "hex", + "num-bigint 0.4.3", + "num-integer", + "num-traits", + "rayon", + "serde", + "serde_json", + "smallvec", + "structopt", + "sync_vm 1.3.2", + "test-log", + "tracing", + "zk_evm 1.3.2", + "zkevm-assembly", +] + +[[package]] +name = "zkevm_test_harness" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.3#363ead7afaac72bd3006c49d501934747781cbb4" dependencies = [ "bincode", "circuit_testing", @@ -6193,10 +5921,10 @@ dependencies = [ "serde_json", "smallvec", "structopt", - "sync_vm", + "sync_vm 1.3.3", "test-log", "tracing", - "zk_evm", + "zk_evm 1.3.3", "zkevm-assembly", ] @@ -6213,7 +5941,7 @@ name = "zksync_circuit_synthesizer" version = "0.1.0" dependencies = [ "ctrlc", - "futures 0.3.27", + "futures 0.3.28", "local-ip-address", "metrics", "prometheus_exporter", @@ -6221,13 +5949,13 @@ dependencies = [ "structopt", "tokio", "vlog", - "zkevm_test_harness", + "zkevm_test_harness 1.3.3", "zksync_config", "zksync_dal", "zksync_object_store", "zksync_prover_utils", "zksync_queued_job_processor", - "zksync_types", + "zksync_utils", ] [[package]] @@ -6278,7 +6006,6 @@ name = "zksync_dal" version = "1.0.0" dependencies = [ "anyhow", - "async-std", "bigdecimal", "bincode", "hex", @@ -6288,14 +6015,13 @@ dependencies = [ "once_cell", "serde_json", "sqlx", + "strum", "thiserror", + "tokio", "vlog", - "vm", "zksync_config", "zksync_contracts", "zksync_health_check", - "zksync_state", - "zksync_storage", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -6304,13 +6030,15 @@ dependencies = [ [[package]] name = "zksync_health_check" version = "0.1.0" +dependencies = [ + "async-trait", +] [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" dependencies = [ "once_cell", - "rayon", "zksync_basic_types", "zksync_crypto", ] @@ -6319,6 +6047,7 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "async-trait", "bincode", "google-cloud-auth", "google-cloud-default", @@ -6336,7 +6065,7 @@ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ "ctrlc", - "futures 0.3.27", + "futures 0.3.28", "metrics", "regex", "reqwest", @@ -6357,32 +6086,6 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "zksync_state" -version = "1.0.0" -dependencies = [ - "metrics", - "vlog", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_storage" -version = "1.0.0" -dependencies = [ - "bincode", - "byteorder", - "num_cpus", - "once_cell", - "rocksdb", - "serde", - "vlog", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_types" version = "1.0.0" @@ -6391,23 +6094,19 @@ dependencies = [ "blake2 0.10.6", "chrono", "codegen 0.1.0", - "ethbloom", - "hex", "metrics", "num 0.3.1", "once_cell", "parity-crypto", - "rayon", "rlp", "serde", "serde_json", "serde_with", "strum", "thiserror", - "tiny-keccak 1.5.0", - "zk_evm", + "zk_evm 1.3.3", "zkevm-assembly", - "zkevm_test_harness", + "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -6422,16 +6121,17 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.27", + "futures 0.3.28", "hex", "itertools", + "metrics", "num 0.3.1", "reqwest", "serde", "thiserror", "tokio", "vlog", - "zk_evm", + "zk_evm 1.3.3", "zksync_basic_types", ] diff --git a/core/bin/circuit_synthesizer/Cargo.toml b/core/bin/circuit_synthesizer/Cargo.toml index 384f01bec1c1..987158d38fcd 100644 --- a/core/bin/circuit_synthesizer/Cargo.toml +++ b/core/bin/circuit_synthesizer/Cargo.toml @@ -12,18 +12,18 @@ zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_object_store = { path = "../../lib/object_store", version = "1.0" } -zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } -zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } +zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3"} prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["legacy"], default-features=false} structopt = "0.3.26" tokio = { version = "1.23.0", features = ["full"] } -futures = { version = "0.3", features = ["compat"] } +futures = "0.3" ctrlc = { version = "3.1", features = ["termination"] } local-ip-address = "0.5.0" metrics = "0.20" diff --git a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs index 9cfa9ec81829..5f6842592e2d 100644 --- a/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/core/bin/circuit_synthesizer/src/circuit_synthesizer.rs @@ -40,6 +40,7 @@ pub struct CircuitSynthesizer { allowed_circuit_types: Option>, region: String, zone: String, + prover_connection_pool: ConnectionPool, } impl CircuitSynthesizer { @@ -47,6 +48,7 @@ impl CircuitSynthesizer { config: CircuitSynthesizerConfig, prover_groups: ProverGroupConfig, store_factory: &ObjectStoreFactory, + prover_connection_pool: ConnectionPool, ) -> Result { let is_specialized = prover_groups.is_specialized_group_id(config.prover_group_id); let allowed_circuit_types = if is_specialized { @@ -74,11 +76,12 @@ impl CircuitSynthesizer { Ok(Self { config, - blob_store: store_factory.create_store(), + blob_store: store_factory.create_store().await, allowed_circuit_types: allowed_circuit_types .map(|x| x.into_iter().map(|x| x.1).collect()), region: get_region().await, zone: get_zone().await, + prover_connection_pool, }) } @@ -116,21 +119,21 @@ impl JobProcessor for CircuitSynthesizer { type JobArtifacts = (ProvingAssembly, u8); const SERVICE_NAME: &'static str = "CircuitSynthesizer"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { vlog::trace!( "Attempting to fetch job types: {:?}", self.allowed_circuit_types ); - let mut storage = connection_pool.access_storage_blocking(); + let mut storage = self.prover_connection_pool.access_storage().await; let prover_job = match &self.allowed_circuit_types { - Some(types) => storage - .prover_dal() - .get_next_prover_job_by_circuit_types(types.clone()), - None => storage.prover_dal().get_next_prover_job(), + Some(types) => { + storage + .prover_dal() + .get_next_prover_job_by_circuit_types(types.clone()) + .await + } + None => storage.prover_dal().get_next_prover_job().await, }?; let circuit_key = CircuitKey { @@ -142,6 +145,7 @@ impl JobProcessor for CircuitSynthesizer { let input = self .blob_store .get(circuit_key) + .await .map_err(CircuitSynthesizerError::InputLoadFailed) .unwrap_or_else(|err| panic!("{err:?}")); @@ -150,19 +154,20 @@ impl JobProcessor for CircuitSynthesizer { async fn save_failure( &self, - pool: ConnectionPool, job_id: Self::JobId, _started_at: Instant, error: String, ) { - pool.access_storage_blocking() + self.prover_connection_pool + .access_storage() + .await .prover_dal() - .save_proof_error(job_id, error, self.config.max_attempts); + .save_proof_error(job_id, error, self.config.max_attempts) + .await; } async fn process_job( &self, - _connection_pool: ConnectionPool, job: Self::Job, _started_at: Instant, ) -> JoinHandle { @@ -171,7 +176,6 @@ impl JobProcessor for CircuitSynthesizer { async fn save_result( &self, - pool: ConnectionPool, job_id: Self::JobId, _started_at: Instant, (assembly, circuit_id): Self::JobArtifacts, @@ -194,15 +198,18 @@ impl JobProcessor for CircuitSynthesizer { let mut attempts = 0; while now.elapsed() < self.config.prover_instance_wait_timeout() { - let prover = pool - .access_storage_blocking() + let prover = self + .prover_connection_pool + .access_storage() + .await .gpu_prover_queue_dal() .lock_available_prover( self.config.gpu_prover_queue_timeout(), self.config.prover_group_id, self.region.clone(), self.zone.clone(), - ); + ) + .await; if let Some(address) = prover { let result = send_assembly(job_id, &mut serialized, &address); @@ -210,10 +217,11 @@ impl JobProcessor for CircuitSynthesizer { &result, job_id, &address, - &pool, + &self.prover_connection_pool, self.region.clone(), self.zone.clone(), - ); + ) + .await; if result.is_ok() { return; @@ -294,7 +302,7 @@ fn can_be_retried(err: ErrorKind) -> bool { matches!(err, ErrorKind::TimedOut | ErrorKind::ConnectionRefused) } -fn handle_send_result( +async fn handle_send_result( result: &Result<(Duration, u64), String>, job_id: u32, address: &SocketAddress, @@ -321,9 +329,11 @@ fn handle_send_result( // endregion - pool.access_storage_blocking() + pool.access_storage() + .await .prover_dal() - .update_status(job_id, "in_gpu_proof"); + .update_status(job_id, "in_gpu_proof") + .await; } Err(err) => { @@ -333,7 +343,8 @@ fn handle_send_result( ); // mark prover instance in gpu_prover_queue dead - pool.access_storage_blocking() + pool.access_storage() + .await .gpu_prover_queue_dal() .update_prover_instance_status( address.clone(), @@ -341,17 +352,20 @@ fn handle_send_result( 0, region, zone, - ); + ) + .await; let prover_config = ProverConfigs::from_env().non_gpu; // mark the job as failed - pool.access_storage_blocking() + pool.access_storage() + .await .prover_dal() .save_proof_error( job_id, "prover instance unreachable".to_string(), prover_config.max_attempts, - ); + ) + .await; } } } diff --git a/core/bin/circuit_synthesizer/src/main.rs b/core/bin/circuit_synthesizer/src/main.rs index 0f168df2ffb2..4ae27a73aa49 100644 --- a/core/bin/circuit_synthesizer/src/main.rs +++ b/core/bin/circuit_synthesizer/src/main.rs @@ -1,12 +1,15 @@ -use futures::future; +use prometheus_exporter::run_prometheus_exporter; use structopt::StructOpt; -use tokio::{sync::oneshot, sync::watch, task::JoinHandle}; +use tokio::{sync::oneshot, sync::watch}; -use prometheus_exporter::run_prometheus_exporter; -use zksync_config::configs::{utils::Prometheus, CircuitSynthesizerConfig, ProverGroupConfig}; +use zksync_config::configs::{ + AlertsConfig, CircuitSynthesizerConfig, PrometheusConfig, ProverGroupConfig, +}; +use zksync_dal::connection::DbVariant; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; +use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::circuit_synthesizer::CircuitSynthesizer; @@ -20,35 +23,18 @@ struct Opt { number_of_iterations: Option, } -async fn wait_for_tasks(task_futures: Vec>) { - match future::select_all(task_futures).await.0 { - Ok(_) => { - vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); - } - Err(err) => { - vlog::info!("One of the tokio actors unexpectedly finished with error: {err:?}"); - } - } -} - #[tokio::main] async fn main() { + vlog::init(); let opt = Opt::from_args(); - let sentry_guard = vlog::init(); - match sentry_guard { - Some(_) => vlog::info!( - "Starting Sentry url: {}", - std::env::var("MISC_SENTRY_URL").unwrap(), - ), - None => vlog::info!("No sentry url configured"), - } let config: CircuitSynthesizerConfig = CircuitSynthesizerConfig::from_env(); - let pool = ConnectionPool::new(None, true); + let pool = ConnectionPool::new(None, DbVariant::Prover).await; let circuit_synthesizer = CircuitSynthesizer::new( config.clone(), ProverGroupConfig::from_env(), &ObjectStoreFactory::from_env(), + pool, ) .await .unwrap_or_else(|err| { @@ -68,18 +54,21 @@ async fn main() { .expect("Error setting Ctrl+C handler"); vlog::info!("Starting circuit synthesizer"); - let prometheus_config = Prometheus { + let prometheus_config = PrometheusConfig { listener_port: config.prometheus_listener_port, pushgateway_url: config.prometheus_pushgateway_url, push_interval_ms: config.prometheus_push_interval_ms, }; let tasks = vec![ - run_prometheus_exporter(prometheus_config, true), - tokio::spawn(circuit_synthesizer.run(pool, stop_receiver, opt.number_of_iterations)), + run_prometheus_exporter(prometheus_config.listener_port, None), + tokio::spawn(circuit_synthesizer.run(stop_receiver, opt.number_of_iterations)), ]; + let particular_crypto_alerts = Some(AlertsConfig::from_env().sporadic_crypto_errors_substrs); + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; tokio::select! { - _ = wait_for_tasks(tasks) => {}, + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = stop_signal_receiver => { vlog::info!("Stop signal received, shutting down"); } diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index cd4b7f1ef926..b607a2753b11 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] description = "The zkEVM contract verifier" @@ -16,6 +16,7 @@ zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_contracts = { path = "../../lib/contracts", version = "1.0" } zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } diff --git a/core/bin/contract-verifier/src/error.rs b/core/bin/contract-verifier/src/error.rs index d38c82389ef5..c66756d1f121 100644 --- a/core/bin/contract-verifier/src/error.rs +++ b/core/bin/contract-verifier/src/error.rs @@ -8,14 +8,12 @@ pub enum ContractVerifierError { IncorrectConstructorArguments, #[error("Compilation takes too much time")] CompilationTimeout, - #[error("ZkSolc error: {0}")] - ZkSolcError(String), + #[error("{0} error: {1}")] + CompilerError(String, String), #[error("Compilation error")] CompilationError(serde_json::Value), - #[error("Unknown zksolc version: {0}")] - UnknownZkSolcVersion(String), - #[error("Unknown solc version: {0}")] - UnknownSolcVersion(String), + #[error("Unknown {0} version: {1}")] + UnknownCompilerVersion(String, String), #[error("Contract with {0} name is missing in sources")] MissingContract(String), #[error("There is no {0} source file")] diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 573eb79c9622..8ebd2cdeb2c9 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -1,38 +1,23 @@ use std::cell::RefCell; -use zksync_config::{ - configs::utils::Prometheus as PrometheusConfig, ApiConfig, ContractVerifierConfig, -}; +use zksync_config::{configs::PrometheusConfig, ApiConfig, ContractVerifierConfig}; use zksync_dal::ConnectionPool; use zksync_queued_job_processor::JobProcessor; +use zksync_utils::wait_for_tasks::wait_for_tasks; -use futures::{channel::mpsc, executor::block_on, future, SinkExt, StreamExt}; +use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; use tokio::sync::watch; -use tokio::task::JoinHandle; use crate::verifier::ContractVerifier; pub mod error; pub mod verifier; pub mod zksolc_utils; - -pub async fn wait_for_tasks(task_futures: Vec>) { - match future::select_all(task_futures).await.0 { - Ok(_) => { - vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); - } - Err(error) => { - vlog::info!( - "One of the tokio actors unexpectedly finished with error: {:?}", - error - ); - } - } -} +pub mod zkvyper_utils; async fn update_compiler_versions(connection_pool: &ConnectionPool) { - let mut storage = connection_pool.access_storage_blocking(); - let mut transaction = storage.start_transaction_blocking(); + let mut storage = connection_pool.access_storage().await; + let mut transaction = storage.start_transaction().await; let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); @@ -41,8 +26,11 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .unwrap() .filter_map(|file| { let file = file.unwrap(); - if file.file_type().unwrap().is_dir() { - Some(file.file_name().into_string().unwrap()) + let Ok(file_type) = file.file_type() else { + return None; + }; + if file_type.is_dir() { + file.file_name().into_string().ok() } else { None } @@ -52,6 +40,7 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .explorer() .contract_verification_dal() .set_zksolc_versions(zksolc_versions) + .await .unwrap(); let solc_path = format!("{}/etc/solc-bin/", zksync_home); @@ -59,8 +48,11 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .unwrap() .filter_map(|file| { let file = file.unwrap(); - if file.file_type().unwrap().is_dir() { - Some(file.file_name().into_string().unwrap()) + let Ok(file_type) = file.file_type() else { + return None; + }; + if file_type.is_dir() { + file.file_name().into_string().ok() } else { None } @@ -70,12 +62,59 @@ async fn update_compiler_versions(connection_pool: &ConnectionPool) { .explorer() .contract_verification_dal() .set_solc_versions(solc_versions) + .await + .unwrap(); + + let zkvyper_path = format!("{}/etc/zkvyper-bin/", zksync_home); + let zkvyper_versions: Vec = std::fs::read_dir(zkvyper_path) + .unwrap() + .filter_map(|file| { + let file = file.unwrap(); + let Ok(file_type) = file.file_type() else { + return None; + }; + if file_type.is_dir() { + file.file_name().into_string().ok() + } else { + None + } + }) + .collect(); + transaction + .explorer() + .contract_verification_dal() + .set_zkvyper_versions(zkvyper_versions) + .await .unwrap(); - transaction.commit_blocking(); + let vyper_path = format!("{}/etc/vyper-bin/", zksync_home); + let vyper_versions: Vec = std::fs::read_dir(vyper_path) + .unwrap() + .filter_map(|file| { + let file = file.unwrap(); + let Ok(file_type) = file.file_type() else { + return None; + }; + if file_type.is_dir() { + file.file_name().into_string().ok() + } else { + None + } + }) + .collect(); + + transaction + .explorer() + .contract_verification_dal() + .set_vyper_versions(vyper_versions) + .await + .unwrap(); + + transaction.commit().await; } use structopt::StructOpt; +use zksync_dal::connection::DbVariant; #[derive(StructOpt)] #[structopt(name = "zkSync contract code verifier", author = "Matter Labs")] @@ -94,9 +133,10 @@ async fn main() { listener_port: verifier_config.prometheus_port, ..ApiConfig::from_env().prometheus }; - let pool = ConnectionPool::new(Some(1), true); + let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; - let sentry_guard = vlog::init(); + vlog::init(); + let sentry_guard = vlog::init_sentry(); match sentry_guard { Some(_) => vlog::info!( "Starting Sentry url: {}", @@ -118,14 +158,20 @@ async fn main() { update_compiler_versions(&pool).await; - let contract_verifier = ContractVerifier::new(verifier_config); + let contract_verifier = ContractVerifier::new(verifier_config, pool); let tasks = vec![ - tokio::spawn(contract_verifier.run(pool, stop_receiver, opt.jobs_number)), - prometheus_exporter::run_prometheus_exporter(prometheus_config, false), + // The prover connection pool is not used by the contract verifier, but we need to pass it + // since `JobProcessor` trait requires it. + tokio::spawn(contract_verifier.run(stop_receiver, opt.jobs_number)), + prometheus_exporter::run_prometheus_exporter(prometheus_config.listener_port, None), ]; + + let particular_crypto_alerts = None; + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; tokio::select! { - _ = async { wait_for_tasks(tasks).await } => {}, - _ = async { stop_signal_receiver.next().await } => { + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, + _ = stop_signal_receiver.next() => { vlog::info!("Stop signal received, shutting down"); }, }; diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index eb69d5052481..e13ca05c5425 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -14,16 +14,17 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ explorer_api::{ - CompilationArtifacts, DeployContractCalldata, SourceCodeData, VerificationInfo, - VerificationRequest, + CompilationArtifacts, CompilerType, DeployContractCalldata, SourceCodeData, + VerificationInfo, VerificationRequest, }, Address, }; use crate::error::ContractVerifierError; use crate::zksolc_utils::{ - CompilerInput, CompilerOutput, Optimizer, Settings, Source, StandardJson, ZkSolc, + Optimizer, Settings, Source, StandardJson, ZkSolc, ZkSolcInput, ZkSolcOutput, }; +use crate::zkvyper_utils::{ZkVyper, ZkVyperInput}; lazy_static! { static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); @@ -38,11 +39,15 @@ enum ConstructorArgs { #[derive(Debug)] pub struct ContractVerifier { config: ContractVerifierConfig, + connection_pool: ConnectionPool, } impl ContractVerifier { - pub fn new(config: ContractVerifierConfig) -> Self { - Self { config } + pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { + Self { + config, + connection_pool, + } } async fn verify( @@ -56,7 +61,7 @@ impl ContractVerifier { let (deployed_bytecode, creation_tx_calldata) = storage .explorer() .contract_verification_dal() - .get_contract_info_for_verification(request.req.contract_address) + .get_contract_info_for_verification(request.req.contract_address).await .unwrap() .ok_or_else(|| { vlog::warn!("Contract is missing in DB for already accepted verification request. Contract address: {:#?}", request.req.contract_address); @@ -89,7 +94,7 @@ impl ContractVerifier { }) } - async fn compile( + async fn compile_zksolc( request: VerificationRequest, config: ContractVerifierConfig, ) -> Result { @@ -104,42 +109,41 @@ impl ContractVerifier { request.req.contract_name.clone(), ) }; - let input = Self::build_compiler_input(request.clone(), file_name.clone())?; + let input = Self::build_zksolc_input(request.clone(), file_name.clone())?; let zksync_home = env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let zksolc_path = Path::new(&zksync_home) .join("etc") .join("zksolc-bin") - .join(request.req.compiler_zksolc_version.as_str()) + .join(request.req.compiler_versions.zk_compiler_version()) .join("zksolc"); if !zksolc_path.exists() { - return Err(ContractVerifierError::UnknownZkSolcVersion( - request.req.compiler_zksolc_version, + return Err(ContractVerifierError::UnknownCompilerVersion( + "zksolc".to_string(), + request.req.compiler_versions.zk_compiler_version(), )); } let solc_path = Path::new(&zksync_home) .join("etc") .join("solc-bin") - .join(request.req.compiler_solc_version.as_str()) + .join(request.req.compiler_versions.compiler_version()) .join("solc"); if !solc_path.exists() { - return Err(ContractVerifierError::UnknownSolcVersion( - request.req.compiler_solc_version, + return Err(ContractVerifierError::UnknownCompilerVersion( + "solc".to_string(), + request.req.compiler_versions.compiler_version(), )); } let zksolc = ZkSolc::new(zksolc_path, solc_path); - let output = time::timeout( - config.compilation_timeout(), - zksolc.async_compile(&input, request.req.is_system), - ) - .await - .map_err(|_| ContractVerifierError::CompilationTimeout)??; + let output = time::timeout(config.compilation_timeout(), zksolc.async_compile(input)) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)??; match output { - CompilerOutput::StandardJson(output) => { + ZkSolcOutput::StandardJson(output) => { if let Some(errors) = output.get("errors") { let errors = errors.as_array().unwrap().clone(); if errors @@ -179,7 +183,7 @@ impl ContractVerifier { Ok(CompilationArtifacts { bytecode, abi }) } - CompilerOutput::YulSingleFile(output) => { + ZkSolcOutput::YulSingleFile(output) => { let re = Regex::new(r"Contract `.*` bytecode: 0x([\da-f]+)").unwrap(); let cap = re.captures(&output).unwrap(); let bytecode_str = cap.get(1).unwrap().as_str(); @@ -192,10 +196,86 @@ impl ContractVerifier { } } - fn build_compiler_input( + async fn compile_zkvyper( + request: VerificationRequest, + config: ContractVerifierConfig, + ) -> Result { + // Users may provide either just contract name or + // source file name and contract name joined with ":". + let contract_name = + if let Some((_file_name, contract_name)) = request.req.contract_name.rsplit_once(':') { + contract_name.to_string() + } else { + request.req.contract_name.clone() + }; + let input = Self::build_zkvyper_input(request.clone())?; + + let zksync_home = env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); + let zkvyper_path = Path::new(&zksync_home) + .join("etc") + .join("zkvyper-bin") + .join(request.req.compiler_versions.zk_compiler_version()) + .join("zkvyper"); + if !zkvyper_path.exists() { + return Err(ContractVerifierError::UnknownCompilerVersion( + "zkvyper".to_string(), + request.req.compiler_versions.zk_compiler_version(), + )); + } + + let vyper_path = Path::new(&zksync_home) + .join("etc") + .join("vyper-bin") + .join(request.req.compiler_versions.compiler_version()) + .join("vyper"); + if !vyper_path.exists() { + return Err(ContractVerifierError::UnknownCompilerVersion( + "vyper".to_string(), + request.req.compiler_versions.compiler_version(), + )); + } + + let zkvyper = ZkVyper::new(zkvyper_path, vyper_path); + + let output = time::timeout(config.compilation_timeout(), zkvyper.async_compile(input)) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)??; + + let object = output + .as_object() + .cloned() + .ok_or(ContractVerifierError::InternalError)?; + for (path, artifact) in object { + let path = Path::new(&path); + if path.file_name().unwrap().to_str().unwrap() == contract_name.as_str() { + let bytecode_str = artifact["bytecode"] + .as_str() + .ok_or(ContractVerifierError::InternalError)?; + let bytecode = hex::decode(bytecode_str).unwrap(); + return Ok(CompilationArtifacts { + abi: artifact["abi"].clone(), + bytecode, + }); + } + } + + Err(ContractVerifierError::MissingContract(contract_name)) + } + + async fn compile( + request: VerificationRequest, + config: ContractVerifierConfig, + ) -> Result { + match request.req.source_code_data.compiler_type() { + CompilerType::Solc => Self::compile_zksolc(request, config).await, + CompilerType::Vyper => Self::compile_zkvyper(request, config).await, + } + } + + fn build_zksolc_input( request: VerificationRequest, file_name: String, - ) -> Result { + ) -> Result { let default_output_selection = serde_json::json!( { "*": { @@ -222,7 +302,7 @@ impl ContractVerifier { metadata: None, }; - Ok(CompilerInput::StandardJson(StandardJson { + Ok(ZkSolcInput::StandardJson(StandardJson { language: "Solidity".to_string(), sources, settings, @@ -234,14 +314,28 @@ impl ContractVerifier { .map_err(|_| ContractVerifierError::FailedToDeserializeInput)?; // Set default output selection even if it is different in request. compiler_input.settings.output_selection = Some(default_output_selection); - Ok(CompilerInput::StandardJson(compiler_input)) + Ok(ZkSolcInput::StandardJson(compiler_input)) } SourceCodeData::YulSingleFile(source_code) => { - Ok(CompilerInput::YulSingleFile(source_code)) + Ok(ZkSolcInput::YulSingleFile(source_code)) } + _ => panic!("Unexpected SourceCode variant"), } } + fn build_zkvyper_input( + request: VerificationRequest, + ) -> Result { + let sources = match request.req.source_code_data { + SourceCodeData::VyperMultiFile(s) => s, + _ => panic!("Unexpected SourceCode variant"), + }; + Ok(ZkVyperInput { + sources, + optimizer_mode: request.req.optimizer_mode, + }) + } + fn decode_constructor_arguments_from_calldata( calldata: DeployContractCalldata, contract_address_to_verify: Address, @@ -318,7 +412,7 @@ impl ContractVerifier { } } - fn process_result( + async fn process_result( storage: &mut StorageProcessor<'_>, request_id: usize, verification_result: Result, @@ -329,6 +423,7 @@ impl ContractVerifier { .explorer() .contract_verification_dal() .save_verification_info(info) + .await .unwrap(); vlog::info!("Successfully processed request with id = {}", request_id); } @@ -344,6 +439,7 @@ impl ContractVerifier { .explorer() .contract_verification_dal() .save_verification_error(request_id, error_message, compilation_errors, None) + .await .unwrap(); vlog::info!("Request with id = {} was failed", request_id); } @@ -360,11 +456,8 @@ impl JobProcessor for ContractVerifier { const SERVICE_NAME: &'static str = "contract_verifier"; const BACKOFF_MULTIPLIER: u64 = 1; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut connection = self.connection_pool.access_storage().await; // Time overhead for all operations except for compilation. const TIME_OVERHEAD: Duration = Duration::from_secs(10); @@ -376,19 +469,14 @@ impl JobProcessor for ContractVerifier { .explorer() .contract_verification_dal() .get_next_queued_verification_request(self.config.compilation_timeout() + TIME_OVERHEAD) + .await .unwrap(); job.map(|job| (job.id, job)) } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: usize, - _started_at: Instant, - error: String, - ) { - let mut connection = connection_pool.access_storage_blocking(); + async fn save_failure(&self, job_id: usize, _started_at: Instant, error: String) { + let mut connection = self.connection_pool.access_storage().await; connection .explorer() @@ -399,25 +487,26 @@ impl JobProcessor for ContractVerifier { serde_json::Value::Array(Vec::new()), Some(error), ) + .await .unwrap(); } #[allow(clippy::async_yields_async)] async fn process_job( &self, - connection_pool: ConnectionPool, job: VerificationRequest, started_at: Instant, ) -> tokio::task::JoinHandle<()> { + let connection_pool = self.connection_pool.clone(); tokio::task::spawn(async move { vlog::info!("Started to process request with id = {}", job.id); let config: ContractVerifierConfig = ContractVerifierConfig::from_env(); - let mut connection = connection_pool.access_storage_blocking(); + let mut connection = connection_pool.access_storage().await; let job_id = job.id; let verification_result = Self::verify(&mut connection, job, config).await; - Self::process_result(&mut connection, job_id, verification_result); + Self::process_result(&mut connection, job_id, verification_result).await; metrics::histogram!( "api.contract_verifier.request_processing_time", @@ -426,13 +515,7 @@ impl JobProcessor for ContractVerifier { }) } - async fn save_result( - &self, - _: ConnectionPool, - _: Self::JobId, - _: Instant, - _: Self::JobArtifacts, - ) { + async fn save_result(&self, _: Self::JobId, _: Instant, _: Self::JobArtifacts) { // Do nothing } } diff --git a/core/bin/contract-verifier/src/zksolc_utils.rs b/core/bin/contract-verifier/src/zksolc_utils.rs index 0602917f0266..4fba999453c4 100644 --- a/core/bin/contract-verifier/src/zksolc_utils.rs +++ b/core/bin/contract-verifier/src/zksolc_utils.rs @@ -7,13 +7,13 @@ use std::process::Stdio; use crate::error::ContractVerifierError; #[derive(Debug)] -pub enum CompilerInput { +pub enum ZkSolcInput { StandardJson(StandardJson), YulSingleFile(String), } #[derive(Debug)] -pub enum CompilerOutput { +pub enum ZkSolcOutput { StandardJson(serde_json::Value), YulSingleFile(String), } @@ -115,13 +115,14 @@ impl ZkSolc { pub async fn async_compile( &self, - input: &CompilerInput, - is_system_flag: bool, - ) -> Result { + input: ZkSolcInput, + ) -> Result { use tokio::io::AsyncWriteExt; let mut command = tokio::process::Command::new(&self.zksolc_path); - if is_system_flag { - command.arg("--system-mode"); + if let ZkSolcInput::StandardJson(input) = &input { + if input.settings.is_system { + command.arg("--system-mode"); + } } command .arg("--solc") @@ -129,14 +130,14 @@ impl ZkSolc { .stdout(Stdio::piped()) .stderr(Stdio::piped()); match input { - CompilerInput::StandardJson(input) => { + ZkSolcInput::StandardJson(input) => { let mut child = command .arg("--standard-json") .stdin(Stdio::piped()) .spawn() .map_err(|_err| ContractVerifierError::InternalError)?; let stdin = child.stdin.as_mut().unwrap(); - let content = serde_json::to_vec(input).unwrap(); + let content = serde_json::to_vec(&input).unwrap(); stdin .write_all(&content) .await @@ -151,17 +152,18 @@ impl ZkSolc { .await .map_err(|_err| ContractVerifierError::InternalError)?; if output.status.success() { - Ok(CompilerOutput::StandardJson( + Ok(ZkSolcOutput::StandardJson( serde_json::from_slice(&output.stdout) .expect("Compiler output must be valid JSON"), )) } else { - Err(ContractVerifierError::ZkSolcError( + Err(ContractVerifierError::CompilerError( + "zksolc".to_string(), String::from_utf8_lossy(&output.stderr).to_string(), )) } } - CompilerInput::YulSingleFile(content) => { + ZkSolcInput::YulSingleFile(content) => { let mut file = tempfile::Builder::new() .prefix("input") .suffix(".yul") @@ -183,11 +185,12 @@ impl ZkSolc { .await .map_err(|_err| ContractVerifierError::InternalError)?; if output.status.success() { - Ok(CompilerOutput::YulSingleFile( + Ok(ZkSolcOutput::YulSingleFile( String::from_utf8(output.stdout).expect("Couldn't parse string"), )) } else { - Err(ContractVerifierError::ZkSolcError( + Err(ContractVerifierError::CompilerError( + "zksolc".to_string(), String::from_utf8_lossy(&output.stderr).to_string(), )) } diff --git a/core/bin/contract-verifier/src/zkvyper_utils.rs b/core/bin/contract-verifier/src/zkvyper_utils.rs new file mode 100644 index 000000000000..a0831f44712b --- /dev/null +++ b/core/bin/contract-verifier/src/zkvyper_utils.rs @@ -0,0 +1,72 @@ +use std::collections::HashMap; +use std::io::Write; +use std::path::PathBuf; +use std::process::Stdio; + +use crate::error::ContractVerifierError; + +#[derive(Debug)] +pub struct ZkVyperInput { + pub sources: HashMap, + pub optimizer_mode: Option, +} + +pub struct ZkVyper { + zkvyper_path: PathBuf, + vyper_path: PathBuf, +} + +impl ZkVyper { + pub fn new(zkvyper_path: impl Into, vyper_path: impl Into) -> Self { + ZkVyper { + zkvyper_path: zkvyper_path.into(), + vyper_path: vyper_path.into(), + } + } + + pub async fn async_compile( + &self, + input: ZkVyperInput, + ) -> Result { + let mut command = tokio::process::Command::new(&self.zkvyper_path); + if let Some(o) = input.optimizer_mode.as_ref() { + command.arg("-O").arg(o); + } + command + .arg("--vyper") + .arg(self.vyper_path.to_str().unwrap()) + .arg("-f") + .arg("combined_json") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let mut files = vec![]; + for (name, content) in input.sources { + let mut file = tempfile::Builder::new() + .prefix(&name) + .suffix("") + .rand_bytes(0) + .tempfile() + .map_err(|_err| ContractVerifierError::InternalError)?; + file.write_all(content.as_bytes()) + .map_err(|_err| ContractVerifierError::InternalError)?; + command.arg(file.path().to_str().unwrap()); + files.push(file); + } + let child = command + .spawn() + .map_err(|_err| ContractVerifierError::InternalError)?; + let output = child + .wait_with_output() + .await + .map_err(|_err| ContractVerifierError::InternalError)?; + if output.status.success() { + Ok(serde_json::from_slice(&output.stdout).expect("Compiler output must be valid JSON")) + } else { + Err(ContractVerifierError::CompilerError( + "zkvyper".to_string(), + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } +} diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index f96d4d77b3a9..c8a0516bd1e3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" version = "1.0.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" @@ -15,11 +15,20 @@ zksync_core = { path = "../zksync_core", version = "1.0" } zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_storage = { path = "../../lib/storage", version = "1.0" } -zksync_eth_client = { path = "../../lib/eth_client", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_state = { path = "../../lib/state", version = "1.0" } zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } +zksync_contracts = { path = "../../lib/contracts", version = "1.0" } + prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } +zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0" } +zksync_types = { path = "../../lib/types", version = "1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } anyhow = "1.0" tokio = { version = "1", features = ["time"] } +futures = "0.3" +serde = { version = "1.0", features = ["derive"] } +envy = "0.4" +url = "2.4" diff --git a/core/bin/external_node/src/config.rs b/core/bin/external_node/src/config.rs new file mode 100644 index 000000000000..9ec79384da57 --- /dev/null +++ b/core/bin/external_node/src/config.rs @@ -0,0 +1,375 @@ +use anyhow::Context; +use serde::Deserialize; +use std::{env, time::Duration}; +use url::Url; + +use zksync_basic_types::{Address, L1ChainId, L2ChainId, H256}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_core::api_server::{tx_sender::TxSenderConfig, web3::state::InternalApiConfig}; +use zksync_types::api::BridgeAddresses; + +use zksync_web3_decl::{ + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, +}; + +/// This part of the external node config is fetched directly from the main node. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct RemoteENConfig { + pub diamond_proxy_addr: Address, + pub l1_erc20_bridge_proxy_addr: Address, + pub l2_erc20_bridge_addr: Address, + pub l1_weth_bridge_proxy_addr: Option
, + pub l2_weth_bridge_addr: Option
, + pub l2_testnet_paymaster_addr: Option
, + pub l2_chain_id: L2ChainId, + pub l1_chain_id: L1ChainId, +} + +impl RemoteENConfig { + pub async fn fetch(client: &HttpClient) -> anyhow::Result { + let bridges = client + .get_bridge_contracts() + .await + .context("Failed to fetch bridge contracts")?; + let l2_testnet_paymaster_addr = client + .get_testnet_paymaster() + .await + .context("Failed to fetch paymaster")?; + let diamond_proxy_addr = client + .get_main_contract() + .await + .context("Failed to fetch L1 contract address")?; + let l2_chain_id = L2ChainId( + client + .chain_id() + .await + .context("Failed to fetch L2 chain ID")? + .as_u64() as u16, + ); + let l1_chain_id = L1ChainId( + client + .l1_chain_id() + .await + .context("Failed to fetch L1 chain ID")? + .as_u64(), + ); + + Ok(Self { + diamond_proxy_addr, + l2_testnet_paymaster_addr, + l1_erc20_bridge_proxy_addr: bridges.l1_erc20_default_bridge, + l2_erc20_bridge_addr: bridges.l2_erc20_default_bridge, + l1_weth_bridge_proxy_addr: bridges.l1_weth_bridge, + l2_weth_bridge_addr: bridges.l2_weth_bridge, + l2_chain_id, + l1_chain_id, + }) + } +} + +/// This part of the external node config is completely optional to provide. +/// It can tweak limits of the API, delay intervals of cetrain components, etc. +/// If any of the fields are not provided, the default values will be used. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct OptionalENConfig { + /// Max possible limit of filters to be in the API state at once. + filters_limit: Option, + /// Max possible limit of subscriptions to be in the API state at once. + subscriptions_limit: Option, + /// Interval between polling db for pubsub (in ms). + pubsub_polling_interval: Option, + /// Max possible limit of entities to be requested via API at once. + req_entities_limit: Option, + /// Max possible size of an ABI encoded tx (in bytes). + max_tx_size: Option, + /// The factor by which to scale the gasLimit + estimate_gas_scale_factor: Option, + /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. + estimate_gas_acceptable_overestimation: Option, + /// The multiplier to use when suggesting gas price. Should be higher than one, + /// otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block + gas_price_scale_factor: Option, + /// Tx nonce: how far ahead from the committed nonce can it be. + max_nonce_ahead: Option, + metadata_calculator_delay: Option, + /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. + /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. + pub vm_execution_cache_misses_limit: Option, + /// Inbound transaction limit used for throttling. + pub transactions_per_sec_limit: Option, + /// Port on which the Prometheus exporter server is listening. + pub prometheus_port: Option, + /// Throttle interval for the tree in milliseconds. This interval will be + /// applied after each time the tree makes progress. + merkle_tree_throttle: Option, + /// Maximum number of blocks to be processed by the Merkle tree at a time. + max_blocks_per_tree_batch: Option, + /// Max number of VM instances to be concurrently spawned by the API server. + /// This option can be tweaked down if the API server is running out of memory. + vm_concurrency_limit: Option, + /// Smart contract source code cache size for the API server. + factory_deps_cache_size_mb: Option, +} + +impl OptionalENConfig { + pub fn polling_interval(&self) -> Duration { + Duration::from_millis(self.pubsub_polling_interval.unwrap_or(200)) + } + + pub fn req_entities_limit(&self) -> usize { + self.req_entities_limit.unwrap_or(1024) + } + + pub fn filters_limit(&self) -> usize { + self.filters_limit.unwrap_or(10000) + } + + pub fn subscriptions_limit(&self) -> usize { + self.subscriptions_limit.unwrap_or(10000) + } + + pub fn max_tx_size(&self) -> usize { + self.max_tx_size.unwrap_or(1000000) + } + + pub fn estimate_gas_scale_factor(&self) -> f64 { + self.estimate_gas_scale_factor.unwrap_or(1.2) + } + + pub fn estimate_gas_acceptable_overestimation(&self) -> u32 { + self.estimate_gas_acceptable_overestimation.unwrap_or(1000) + } + + pub fn gas_price_scale_factor(&self) -> f64 { + self.gas_price_scale_factor.unwrap_or(1.2) + } + + pub fn max_nonce_ahead(&self) -> u32 { + self.max_nonce_ahead.unwrap_or(50) + } + + pub fn metadata_calculator_delay(&self) -> Duration { + Duration::from_millis(self.metadata_calculator_delay.unwrap_or(100)) + } + + pub fn max_blocks_per_tree_batch(&self) -> usize { + self.max_blocks_per_tree_batch.unwrap_or(100) + } + + pub fn merkle_tree_throttle(&self) -> Duration { + Duration::from_millis(self.merkle_tree_throttle.unwrap_or(0)) + } + + pub fn vm_concurrency_limit(&self) -> Option { + self.vm_concurrency_limit + } + + pub fn factory_deps_cache_size_mb(&self) -> usize { + // 128MB is the default smart contract code cache size. + self.factory_deps_cache_size_mb.unwrap_or(128) + } +} + +/// This part of the external node config is required for its operation. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct RequiredENConfig { + /// Default AA hash used at genesis. + pub default_aa_hash: H256, + /// Bootloader hash used at genesis. + pub bootloader_hash: H256, + + /// Port on which the HTTP RPC server is listening. + pub http_port: u16, + /// Port on which the WebSocket RPC server is listening. + pub ws_port: u16, + /// Port on which the healthcheck REST server is listening. + pub healthcheck_port: u16, + /// Number of threads per API server + pub threads_per_server: usize, + /// Address of the Ethereum node API. + /// Intentionally private: use getter method as it manages the missing port. + eth_client_url: String, + /// Main node URL - used by external node to proxy transactions to, query state from, etc. + /// Intentionally private: use getter method as it manages the missing port. + main_node_url: String, + /// Path to the database data directory that serves state cache. + pub state_cache_path: String, + /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*new* implementation). + pub merkle_tree_path: String, + + pub max_allowed_l2_tx_gas_limit: u32, + pub fee_account_addr: Address, + pub fair_l2_gas_price: u64, + pub validation_computational_gas_limit: u32, +} + +impl RequiredENConfig { + pub fn main_node_url(&self) -> anyhow::Result { + Self::get_url(&self.main_node_url).context("Could not parse main node URL") + } + + pub fn eth_client_url(&self) -> anyhow::Result { + Self::get_url(&self.eth_client_url).context("Could not parse L1 client URL") + } + + fn get_url(url_str: &str) -> anyhow::Result { + let url = Url::parse(url_str).context("URL can not be parsed")?; + format_url_with_port(&url) + } +} + +/// External Node Config contains all the configuration required for the EN operation. +/// It is split into three parts: required, optional and remote for easier navigation. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct ExternalNodeConfig { + pub required: RequiredENConfig, + pub optional: OptionalENConfig, + pub remote: RemoteENConfig, +} + +impl ExternalNodeConfig { + pub fn base_system_contracts_hashes(&self) -> BaseSystemContractsHashes { + BaseSystemContractsHashes { + bootloader: self.required.bootloader_hash, + default_aa: self.required.default_aa_hash, + } + } + + /// Loads config from the environment variables and + /// fetches contracts addresses from the main node. + pub async fn collect() -> anyhow::Result { + let required = envy::prefixed("EN_") + .from_env::() + .context("could not load external node config")?; + + let optional = envy::prefixed("EN_") + .from_env::() + .context("could not load external node config")?; + + let client = HttpClientBuilder::default() + .build(required.main_node_url()?) + .expect("Unable to build HTTP client for main node"); + let remote = RemoteENConfig::fetch(&client) + .await + .context("Unable to fetch required config values from the main node")?; + + // We can query them from main node, but it's better to set them explicitly + // as well to avoid connecting to wrong envs unintentionally. + let eth_chain_id = HttpClientBuilder::default() + .build(required.eth_client_url()?) + .expect("Unable to build HTTP client for L1 client") + .chain_id() + .await + .context("Unable to check L1 chain ID through the configured L1 client")?; + + let l2_chain_id: u16 = env_var("EN_L2_CHAIN_ID"); + let l1_chain_id: u64 = env_var("EN_L1_CHAIN_ID"); + if l2_chain_id != remote.l2_chain_id.0 { + anyhow::bail!( + "Configured L2 chain id doesn't match the one from main node. + Make sure your configuration is correct and you are corrected to the right main node. + Main node L2 chain id: {}. Local config value: {}", + remote.l2_chain_id.0, l2_chain_id + ); + } + if l1_chain_id != remote.l1_chain_id.0 { + anyhow::bail!( + "Configured L1 chain id doesn't match the one from main node. + Make sure your configuration is correct and you are corrected to the right main node. + Main node L1 chain id: {}. Local config value: {}", + remote.l1_chain_id.0, l1_chain_id + ); + } + if l1_chain_id != eth_chain_id.as_u64() { + anyhow::bail!( + "Configured L1 chain id doesn't match the one from eth node. + Make sure your configuration is correct and you are corrected to the right eth node. + Eth node chain id: {}. Local config value: {}", + eth_chain_id, + l1_chain_id + ); + } + + Ok(Self { + remote, + required, + optional, + }) + } +} + +fn env_var(name: &str) -> T +where + T: std::str::FromStr, + T::Err: std::fmt::Debug, +{ + env::var(name) + .unwrap_or_else(|_| panic!("{} env variable is not set", name)) + .parse() + .unwrap_or_else(|_| panic!("unable to parse {} env variable", name)) +} + +impl From for InternalApiConfig { + fn from(config: ExternalNodeConfig) -> Self { + Self { + l1_chain_id: config.remote.l1_chain_id, + l2_chain_id: config.remote.l2_chain_id, + max_tx_size: config.optional.max_tx_size(), + estimate_gas_scale_factor: config.optional.estimate_gas_scale_factor(), + estimate_gas_acceptable_overestimation: config + .optional + .estimate_gas_acceptable_overestimation(), + bridge_addresses: BridgeAddresses { + l1_erc20_default_bridge: config.remote.l1_erc20_bridge_proxy_addr, + l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, + l1_weth_bridge: config.remote.l1_weth_bridge_proxy_addr, + l2_weth_bridge: config.remote.l2_weth_bridge_addr, + }, + diamond_proxy_addr: config.remote.diamond_proxy_addr, + l2_testnet_paymaster_addr: config.remote.l2_testnet_paymaster_addr, + req_entities_limit: config.optional.req_entities_limit(), + } + } +} + +impl From for TxSenderConfig { + fn from(config: ExternalNodeConfig) -> Self { + Self { + fee_account_addr: config.required.fee_account_addr, + gas_price_scale_factor: config.optional.gas_price_scale_factor(), + max_nonce_ahead: config.optional.max_nonce_ahead(), + max_allowed_l2_tx_gas_limit: config.required.max_allowed_l2_tx_gas_limit, + fair_l2_gas_price: config.required.fair_l2_gas_price, + vm_execution_cache_misses_limit: config.optional.vm_execution_cache_misses_limit, + validation_computational_gas_limit: config.required.validation_computational_gas_limit, + default_aa: config.required.default_aa_hash, + bootloader: config.required.bootloader_hash, + } + } +} + +/// Converts the URL into a String with port provided, +/// even if it's the default one. +/// +/// `url` library does not contain required functionality, yet the library we use for RPC +/// requires the port to always explicitly be set. +fn format_url_with_port(url: &Url) -> anyhow::Result { + let scheme = url.scheme(); + let host = url.host_str().context("No host in the URL")?; + let port_str = match url.port_or_known_default() { + Some(port) => format!(":{port}"), + None => String::new(), + }; + let path = url.path(); + let query_str = url.query().map(|q| format!("?{}", q)).unwrap_or_default(); + + Ok(format!( + "{scheme}://{host}{port}{path}{query_str}", + scheme = scheme, + host = host, + port = port_str, + path = path, + query_str = query_str + )) +} diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index bb6ed53b99a9..e5f9d9ef8a48 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -1,39 +1,48 @@ use prometheus_exporter::run_prometheus_exporter; use tokio::{sync::watch, task, time::sleep}; +use zksync_state::FactoryDepsCache; +use config::ExternalNodeConfig; use std::{sync::Arc, time::Duration}; -use zksync_basic_types::L2ChainId; -use zksync_config::ZkSyncConfig; +use zksync_basic_types::Address; +use zksync_config::DBConfig; + +use zksync_core::api_server::healthcheck::HealthCheckHandle; use zksync_core::{ - api_server::{healthcheck, tx_sender::TxSenderBuilder, web3::ApiBuilder}, + api_server::{ + execution_sandbox::VmConcurrencyLimiter, healthcheck, tx_sender::TxSenderBuilder, + web3::ApiBuilder, + }, block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}, consistency_checker::ConsistencyChecker, - data_fetchers::token_list::TokenListFetcher, l1_gas_price::MainNodeGasPriceFetcher, - metadata_calculator::{MetadataCalculator, TreeImplementation}, + metadata_calculator::{ + MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, + }, reorg_detector::ReorgDetector, setup_sigint_handler, - state_keeper::{ - batch_executor::MainBatchExecutorBuilder, seal_criteria::SealManager, ZkSyncStateKeeper, - }, + state_keeper::{MainBatchExecutorBuilder, SealManager, ZkSyncStateKeeper}, sync_layer::{ - batch_status_updater::run_batch_status_updater, external_io::ExternalIO, + batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, ExternalNodeSealer, SyncState, }, - wait_for_tasks, }; -use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; +use zksync_dal::{connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; use zksync_health_check::CheckHealth; use zksync_storage::RocksDB; +use zksync_utils::wait_for_tasks::wait_for_tasks; + +mod config; /// Creates the state keeper configured to work in the external node mode. -fn build_state_keeper( +async fn build_state_keeper( action_queue: ActionQueue, state_keeper_db_path: String, main_node_url: String, connection_pool: ConnectionPool, sync_state: SyncState, + l2_erc20_bridge_addr: Address, stop_receiver: watch::Receiver, ) -> ZkSyncStateKeeper { let en_sealer = ExternalNodeSealer::new(action_queue.clone()); @@ -61,73 +70,92 @@ fn build_state_keeper( validation_computational_gas_limit, )); - let io = Box::new(ExternalIO::new( - connection_pool, - action_queue, - sync_state, - main_node_url, - )); + let io = Box::new( + ExternalIO::new( + connection_pool, + action_queue, + sync_state, + main_node_url, + l2_erc20_bridge_addr, + ) + .await, + ); ZkSyncStateKeeper::new(stop_receiver, io, batch_executor_base, sealer) } async fn init_tasks( - config: ZkSyncConfig, + config: ExternalNodeConfig, connection_pool: ConnectionPool, -) -> (Vec>, watch::Sender) { - let main_node_url = config.api.web3_json_rpc.main_node_url.as_ref().unwrap(); +) -> ( + Vec>, + watch::Sender, + HealthCheckHandle, +) { + let main_node_url = config + .required + .main_node_url() + .expect("Main node URL is incorrect"); let (stop_sender, stop_receiver) = watch::channel::(false); let mut healthchecks: Vec> = Vec::new(); // Create components. - let gas_adjuster = Arc::new(MainNodeGasPriceFetcher::new(main_node_url)); + let gas_adjuster = Arc::new(MainNodeGasPriceFetcher::new(&main_node_url)); let sync_state = SyncState::new(); let action_queue = ActionQueue::new(); let state_keeper = build_state_keeper( action_queue.clone(), - config.db.state_keeper_db_path.clone(), - main_node_url.clone(), + config.required.state_cache_path.clone(), + main_node_url.to_string(), connection_pool.clone(), sync_state.clone(), + config.remote.l2_erc20_bridge_addr, stop_receiver.clone(), - ); + ) + .await; let fetcher = MainNodeFetcher::new( - ConnectionPool::new(Some(1), true), - main_node_url, + ConnectionPool::new(Some(1), DbVariant::Master).await, + &main_node_url, action_queue.clone(), sync_state.clone(), stop_receiver.clone(), - ); - let metadata_calculator = MetadataCalculator::lightweight(&config, TreeImplementation::New); + ) + .await; + + let metadata_calculator = MetadataCalculator::new(&MetadataCalculatorConfig { + db_path: &config.required.merkle_tree_path, + mode: MetadataCalculatorModeConfig::Lightweight, + delay_interval: config.optional.metadata_calculator_delay(), + max_block_batch: config.optional.max_blocks_per_tree_batch(), + throttle_interval: config.optional.merkle_tree_throttle(), + }) + .await; healthchecks.push(Box::new(metadata_calculator.tree_health_check())); + let consistency_checker = ConsistencyChecker::new( - &config.eth_client.web3_url, - config.contracts.validator_timelock_addr, + &config + .required + .eth_client_url() + .expect("L1 client URL is incorrect"), 10, - ConnectionPool::new(Some(1), true), + ConnectionPool::new(Some(1), DbVariant::Master).await, ); - // We need this component to fetch "well-known" tokens. - // And we need to know "well-known" tokens since there are paymaster-related - // checks which depend on this particular token quality. - let token_list_fetcher = TokenListFetcher::new(config.clone()); + + let batch_status_updater = BatchStatusUpdater::new( + &main_node_url, + ConnectionPool::new(Some(1), DbVariant::Master).await, + ) + .await; // Run the components. - let prometheus_task = run_prometheus_exporter(config.api.prometheus.clone(), false); let tree_stop_receiver = stop_receiver.clone(); - let tree_handle = task::spawn_blocking(move || { - let pool = ConnectionPool::new(Some(1), true); - metadata_calculator.run(&pool, tree_stop_receiver); - }); + let tree_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let prover_tree_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let tree_handle = + task::spawn(metadata_calculator.run(tree_pool, prover_tree_pool, tree_stop_receiver)); let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone())); - let updater_stop_receiver = stop_receiver.clone(); - let updater_handle = task::spawn_blocking(move || { - run_batch_status_updater( - ConnectionPool::new(Some(1), true), - action_queue, - updater_stop_receiver, - ) - }); - let sk_handle = task::spawn_blocking(|| state_keeper.run()); + let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone())); + let sk_handle = task::spawn(state_keeper.run()); let fetcher_handle = tokio::spawn(fetcher.run()); let gas_adjuster_handle = tokio::spawn(gas_adjuster.clone().run(stop_receiver.clone())); @@ -135,111 +163,147 @@ async fn init_tasks( let mut tx_sender_builder = TxSenderBuilder::new(config.clone().into(), connection_pool.clone()) .with_main_connection_pool(connection_pool.clone()) - .with_tx_proxy(main_node_url.clone()) - .with_state_keeper_config(config.chain.state_keeper.clone()); + .with_tx_proxy(&main_node_url); // Add rate limiter if enabled. - if let Some(transactions_per_sec_limit) = - config.api.web3_json_rpc.transactions_per_sec_limit - { - tx_sender_builder = tx_sender_builder.with_rate_limiter(transactions_per_sec_limit); + if let Some(tps_limit) = config.optional.transactions_per_sec_limit { + tx_sender_builder = tx_sender_builder.with_rate_limiter(tps_limit); }; - tx_sender_builder.build(gas_adjuster, config.chain.state_keeper.default_aa_hash) + let vm_concurrency_limiter = + VmConcurrencyLimiter::new(config.optional.vm_concurrency_limit()); + + let factory_deps_cache = FactoryDepsCache::new( + "factory_deps_cache", + config.optional.factory_deps_cache_size_mb(), + ); + + tx_sender_builder + .build( + gas_adjuster, + config.required.default_aa_hash, + Arc::new(vm_concurrency_limiter), + factory_deps_cache.clone(), + ) + .await }; - let http_api_handle = + let (http_api_handle, http_api_healthcheck) = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) - .http(config.api.web3_json_rpc.http_port) - .with_filter_limit(config.api.web3_json_rpc.filters_limit()) - .with_threads(config.api.web3_json_rpc.threads_per_server as usize) + .http(config.required.http_port) + .with_filter_limit(config.optional.filters_limit()) + .with_threads(config.required.threads_per_server) .with_tx_sender(tx_sender.clone()) .with_sync_state(sync_state.clone()) - .build(stop_receiver.clone()); + .build(stop_receiver.clone()) + .await; - let token_list_fetcher_handle = - tokio::spawn(token_list_fetcher.run(connection_pool.clone(), stop_receiver.clone())); - - let mut task_handles = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool) - .ws(config.api.web3_json_rpc.ws_port) - .with_filter_limit(config.api.web3_json_rpc.filters_limit()) - .with_subscriptions_limit(config.api.web3_json_rpc.subscriptions_limit()) - .with_polling_interval(config.api.web3_json_rpc.pubsub_interval()) - .with_tx_sender(tx_sender) - .with_sync_state(sync_state) - .build(stop_receiver.clone()); + let (mut task_handles, ws_api_healthcheck) = + ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool) + .ws(config.required.ws_port) + .with_filter_limit(config.optional.filters_limit()) + .with_subscriptions_limit(config.optional.subscriptions_limit()) + .with_polling_interval(config.optional.polling_interval()) + .with_threads(config.required.threads_per_server) + .with_tx_sender(tx_sender) + .with_sync_state(sync_state) + .build(stop_receiver.clone()) + .await; + healthchecks.push(Box::new(ws_api_healthcheck)); + healthchecks.push(Box::new(http_api_healthcheck)); healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( - ConnectionPool::new(Some(1), true), + ConnectionPool::new(Some(1), DbVariant::Master).await, ))); let healthcheck_handle = healthcheck::start_server_thread_detached( - config.api.healthcheck.bind_addr(), + ([0, 0, 0, 0], config.required.healthcheck_port).into(), healthchecks, - stop_receiver, ); + if let Some(port) = config.optional.prometheus_port { + let prometheus_task = run_prometheus_exporter(port, None); + task_handles.push(prometheus_task); + } task_handles.extend(http_api_handle); task_handles.extend([ - prometheus_task, sk_handle, fetcher_handle, updater_handle, tree_handle, gas_adjuster_handle, consistency_checker_handle, - healthcheck_handle, - token_list_fetcher_handle, ]); - (task_handles, stop_sender) + (task_handles, stop_sender, healthcheck_handle) } -async fn shutdown_components(stop_sender: watch::Sender) { +async fn shutdown_components( + stop_sender: watch::Sender, + healthcheck_handle: HealthCheckHandle, +) { let _ = stop_sender.send(true); RocksDB::await_rocksdb_termination(); // Sleep for some time to let components gracefully stop. sleep(Duration::from_secs(10)).await; + healthcheck_handle.stop().await; } #[tokio::main] async fn main() -> anyhow::Result<()> { // Initial setup. - let _sentry_guard = vlog::init(); - let connection_pool = ConnectionPool::new(None, true); - let config = ZkSyncConfig::from_env(); - let main_node_url = config.api.web3_json_rpc.main_node_url.as_ref().unwrap(); + vlog::init(); + let _sentry_guard = vlog::init_sentry(); + let config = ExternalNodeConfig::collect() + .await + .expect("Failed to load external node config"); + let main_node_url = config + .required + .main_node_url() + .expect("Main node URL is incorrect"); + + let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; let sigint_receiver = setup_sigint_handler(); - vlog::warn!("The external node is in the experimental stage, use it at your own risk"); + vlog::warn!("The external node is in the alpha phase, and should be used with caution."); vlog::info!("Started the external node"); vlog::info!("Main node URL is: {}", main_node_url); // Make sure that genesis is performed. perform_genesis_if_needed( - &mut connection_pool.access_storage_blocking(), - L2ChainId(config.chain.eth.zksync_network_id), - config.chain.state_keeper.base_system_contracts_hashes(), + &mut connection_pool.access_storage().await, + config.remote.l2_chain_id, + config.base_system_contracts_hashes(), main_node_url.clone(), ) .await; - let (task_handles, stop_sender) = init_tasks(config.clone(), connection_pool.clone()).await; + let (task_handles, stop_sender, health_check_handle) = + init_tasks(config.clone(), connection_pool.clone()).await; - let reorg_detector = ReorgDetector::new(main_node_url, connection_pool.clone()); + let reorg_detector = ReorgDetector::new(&main_node_url, connection_pool.clone()); let reorg_detector_handle = tokio::spawn(reorg_detector.run()); + let reverter_config = DBConfig { + state_keeper_db_path: config.required.state_cache_path.clone(), + new_merkle_tree_ssd_path: config.required.merkle_tree_path.clone(), + ..Default::default() + }; + + let particular_crypto_alerts = None; + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; tokio::select! { - _ = wait_for_tasks(task_handles, false) => {}, + _ = wait_for_tasks(task_handles, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = sigint_receiver => { vlog::info!("Stop signal received, shutting down"); }, last_correct_batch = reorg_detector_handle => { if let Ok(last_correct_batch) = last_correct_batch { vlog::info!("Performing rollback to block {}", last_correct_batch); - shutdown_components(stop_sender).await; - BlockReverter::new(config, connection_pool, L1ExecutedBatchesRevert::Allowed) + shutdown_components(stop_sender, health_check_handle).await; + BlockReverter::new(reverter_config, None, connection_pool, L1ExecutedBatchesRevert::Allowed) .rollback_db(last_correct_batch, BlockReverterFlags::all()) .await; vlog::info!("Rollback successfully completed, the node has to restart to continue working"); @@ -252,6 +316,6 @@ async fn main() -> anyhow::Result<()> { // Reaching this point means that either some actor exited unexpectedly or we received a stop signal. // Broadcast the stop signal to all actors and exit. - shutdown_components(stop_sender).await; + shutdown_components(stop_sender, health_check_handle).await; Ok(()) } diff --git a/core/bin/l1_tx_effective_gas_price_migration/Cargo.toml b/core/bin/l1_tx_effective_gas_price_migration/Cargo.toml new file mode 100644 index 000000000000..0c6dff2371a1 --- /dev/null +++ b/core/bin/l1_tx_effective_gas_price_migration/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "l1_tx_effective_gas_price_migration" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { version = "1" } +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_dal = { path = "../../lib/dal", version = "1.0" } +structopt = "0.3.26" diff --git a/core/bin/l1_tx_effective_gas_price_migration/src/main.rs b/core/bin/l1_tx_effective_gas_price_migration/src/main.rs new file mode 100644 index 000000000000..ad5815d70648 --- /dev/null +++ b/core/bin/l1_tx_effective_gas_price_migration/src/main.rs @@ -0,0 +1,63 @@ +use structopt::StructOpt; +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "DB migration for setting correct effective_gas_price", + about = "DB migration for setting correct effective_gas_price" +)] +struct Opt { + #[structopt(short = "f", long = "first_post_m6_block")] + first_post_m6_block: u32, +} + +#[tokio::main] +async fn main() { + let opt = Opt::from_args(); + let first_post_m6_block = opt.first_post_m6_block; + println!("first_post_m6_block: {first_post_m6_block}"); + + let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let mut storage = pool.access_storage().await; + + const BLOCK_RANGE: u32 = 1000; + println!("Setting effective gas price for pre-M6 transactions"); + + let mut from_block_number = 0; + loop { + if from_block_number >= first_post_m6_block { + break; + } + + let to_block_number = + std::cmp::min(first_post_m6_block - 1, from_block_number + BLOCK_RANGE - 1); + println!("Block range {from_block_number}-{to_block_number}"); + storage + .transactions_dal() + .migrate_l1_txs_effective_gas_price_pre_m6(from_block_number, to_block_number) + .await; + + from_block_number = to_block_number + 1; + } + + println!("Setting effective gas price for post-M6 transactions"); + + let current_block_number = storage.blocks_dal().get_sealed_miniblock_number().await; + let mut from_block_number = first_post_m6_block; + loop { + if from_block_number > current_block_number.0 { + break; + } + + let to_block_number = + std::cmp::min(current_block_number.0, from_block_number + BLOCK_RANGE - 1); + println!("Block range {from_block_number}-{to_block_number}"); + storage + .transactions_dal() + .migrate_l1_txs_effective_gas_price_post_m6(from_block_number, to_block_number) + .await; + + from_block_number = to_block_number + 1; + } +} diff --git a/core/bin/prover/Cargo.lock b/core/bin/prover/Cargo.lock index 81e95a2b831a..4f12f9e908be 100644 --- a/core/bin/prover/Cargo.lock +++ b/core/bin/prover/Cargo.lock @@ -77,20 +77,26 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -111,14 +117,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -144,15 +150,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -171,9 +177,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" [[package]] name = "assert_matches" @@ -181,137 +187,11 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-channel" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" -dependencies = [ - "async-lock", - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" -dependencies = [ - "async-lock", - "autocfg 1.1.0", - "concurrent-queue", - "futures-lite", - "libc", - "log", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "windows-sys 0.42.0", -] - -[[package]] -name = "async-lock" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-native-tls" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9e7a929bd34c68a82d58a4de7f86fffdaf97fb2af850162a7bb19dd7269b33" -dependencies = [ - "async-std", - "native-tls", - "thiserror", - "url", -] - -[[package]] -name = "async-process" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" -dependencies = [ - "async-io", - "async-lock", - "autocfg 1.1.0", - "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "libc", - "signal-hook", - "windows-sys 0.42.0", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils 0.8.15", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite 0.2.9", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -320,30 +200,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] -[[package]] -name = "async-task" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" - [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -355,12 +229,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-waker" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" - [[package]] name = "atty" version = "0.2.14" @@ -389,14 +257,14 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backon" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294" +checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9" dependencies = [ - "futures 0.3.27", + "fastrand", + "futures-core", "pin-project", - "rand 0.8.5", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] @@ -428,9 +296,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -438,28 +306,19 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beef" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" -dependencies = [ - "serde", -] - [[package]] name = "bellman_ce" version = "0.3.2" source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bit-vec", "blake2s_const", "blake2s_simd", "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.27", + "futures 0.3.28", "hex", "lazy_static", "num_cpus", @@ -506,8 +365,8 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "regex", "rustc-hash", "shlex", @@ -531,9 +390,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" [[package]] name = "bitvec" @@ -564,7 +423,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -634,35 +493,11 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" -[[package]] -name = "blocking" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" -dependencies = [ - "async-channel", - "async-lock", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", -] - -[[package]] -name = "bstr" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -688,25 +523,11 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" -dependencies = [ - "jobserver", -] [[package]] name = "cexpr" @@ -731,13 +552,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "rustc-serialize", "serde", @@ -765,9 +586,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -823,16 +644,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "combine" version = "4.6.6" @@ -843,15 +654,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "concurrent-queue" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" -dependencies = [ - "crossbeam-utils 0.8.15", -] - [[package]] name = "const-oid" version = "0.7.1" @@ -897,15 +699,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -946,11 +748,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.14", + "crossbeam-epoch 0.9.15", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -965,12 +767,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -991,8 +793,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1012,14 +814,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", - "memoffset 0.8.0", + "crossbeam-utils 0.8.16", + "memoffset 0.9.0", "scopeguard", ] @@ -1041,7 +843,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -1057,9 +859,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -1135,25 +937,15 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote 1.0.26", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.6.0" @@ -1165,56 +957,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix", - "windows-sys 0.45.0", -] - -[[package]] -name = "cxx" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" -dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "windows-sys 0.48.0", ] [[package]] @@ -1235,8 +983,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -1248,7 +996,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -1289,8 +1037,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -1301,8 +1049,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustc_version", "syn 1.0.109", ] @@ -1318,9 +1066,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1383,7 +1131,7 @@ dependencies = [ "base16ct", "crypto-bigint 0.4.9", "der 0.6.1", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array", "group", @@ -1440,13 +1188,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1501,12 +1249,6 @@ dependencies = [ "uint", ] -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - [[package]] name = "fastrand" version = "1.9.0" @@ -1548,8 +1290,8 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] @@ -1607,9 +1349,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1694,9 +1436,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1709,9 +1451,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1719,15 +1461,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1748,24 +1490,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" - -[[package]] -name = "futures-lite" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite 0.2.9", - "waker-fn", -] +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-locks" @@ -1773,48 +1500,44 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c4e684ddb2d8a4db5ca8a02b35156da129674ba4412b6f528698d58c594954" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "tokio 0.2.25", ] [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" -dependencies = [ - "gloo-timers", - "send_wrapper", -] [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1831,9 +1554,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1852,9 +1575,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1863,9 +1586,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -1873,71 +1596,14 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "globset" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "gloo-net" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" -dependencies = [ - "futures-channel", - "futures-core", - "futures-sink", - "gloo-utils", - "js-sys", - "pin-project", - "serde", - "serde_json", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "gloo-utils" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e8fc851e9c7b9852508bc6e3f690f452f474417e8545ec9857b7f7377036b5" -dependencies = [ - "js-sys", - "serde", - "serde_json", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "google-cloud-auth" -version = "0.9.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" dependencies = [ "async-trait", - "base64 0.21.0", + "base64 0.21.2", "google-cloud-metadata", "google-cloud-token", "home", @@ -1946,42 +1612,35 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.20", - "tokio 1.28.0", + "time 0.3.22", + "tokio 1.28.2", "tracing", "urlencoding", ] -[[package]] -name = "google-cloud-default" -version = "0.1.1" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" -dependencies = [ - "async-trait", - "google-cloud-auth", - "google-cloud-metadata", - "google-cloud-storage", -] - [[package]] name = "google-cloud-metadata" version = "0.3.2" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" dependencies = [ "reqwest", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] name = "google-cloud-storage" -version = "0.10.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" dependencies = [ "async-stream", - "base64 0.21.0", + "base64 0.21.2", "bytes 1.4.0", "futures-util", + "google-cloud-auth", + "google-cloud-metadata", "google-cloud-token", "hex", "once_cell", @@ -1994,16 +1653,17 @@ dependencies = [ "serde_json", "sha2 0.10.6", "thiserror", - "time 0.3.20", - "tokio 1.28.0", + "time 0.3.22", + "tokio 1.28.2", "tracing", "url", ] [[package]] name = "google-cloud-token" -version = "0.1.0" -source = "git+https://github.com/yoshidan/google-cloud-rust?branch=main#cb14a257e2170c020b906f53b1ce761f20d58492" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a" dependencies = [ "async-trait", ] @@ -2011,12 +1671,12 @@ dependencies = [ [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bindgen", "crossbeam 0.8.2", "derivative", - "futures 0.3.27", + "futures 0.3.28", "futures-locks", "num_cpus", ] @@ -2024,7 +1684,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -2050,9 +1710,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes 1.4.0", "fnv", @@ -2062,16 +1722,16 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.28.0", - "tokio-util 0.7.7", + "tokio 1.28.2", + "tokio-util 0.7.8", "tracing", ] [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -2204,16 +1864,16 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "home" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -2269,9 +1929,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -2285,7 +1945,7 @@ dependencies = [ "itoa 1.0.6", "pin-project-lite 0.2.9", "socket2", - "tokio 1.28.0", + "tokio 1.28.2", "tower-service", "tracing", "want", @@ -2293,18 +1953,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", - "log", "rustls", - "rustls-native-certs", - "tokio 1.28.0", + "tokio 1.28.2", "tokio-rustls", - "webpki-roots", ] [[package]] @@ -2315,7 +1972,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.28.0", + "tokio 1.28.2", "tokio-io-timeout", ] @@ -2328,32 +1985,31 @@ dependencies = [ "bytes 1.4.0", "hyper", "native-tls", - "tokio 1.28.0", + "tokio 1.28.2", "tokio-native-tls", ] [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2375,9 +2031,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2416,16 +2072,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -2442,19 +2098,20 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "ipnetwork" @@ -2464,14 +2121,14 @@ checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2495,20 +2152,11 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" -[[package]] -name = "jobserver" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2519,7 +2167,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.27", + "futures 0.3.28", "futures-executor", "futures-util", "log", @@ -2528,175 +2176,13 @@ dependencies = [ "serde_json", ] -[[package]] -name = "jsonrpsee" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" -dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-http-client", - "jsonrpsee-proc-macros", - "jsonrpsee-server", - "jsonrpsee-types", - "jsonrpsee-wasm-client", - "jsonrpsee-ws-client", - "tracing", -] - -[[package]] -name = "jsonrpsee-client-transport" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" -dependencies = [ - "anyhow", - "futures-channel", - "futures-timer", - "futures-util", - "gloo-net", - "http", - "jsonrpsee-core", - "jsonrpsee-types", - "pin-project", - "rustls-native-certs", - "soketto", - "thiserror", - "tokio 1.28.0", - "tokio-rustls", - "tokio-util 0.7.7", - "tracing", - "webpki-roots", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" -dependencies = [ - "anyhow", - "arrayvec 0.7.2", - "async-lock", - "async-trait", - "beef", - "futures-channel", - "futures-timer", - "futures-util", - "globset", - "hyper", - "jsonrpsee-types", - "parking_lot 0.12.1", - "rand 0.8.5", - "rustc-hash", - "serde", - "serde_json", - "soketto", - "thiserror", - "tokio 1.28.0", - "tracing", - "wasm-bindgen-futures", -] - -[[package]] -name = "jsonrpsee-http-client" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" -dependencies = [ - "async-trait", - "hyper", - "hyper-rustls", - "jsonrpsee-core", - "jsonrpsee-types", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio 1.28.0", - "tracing", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" -dependencies = [ - "heck 0.4.1", - "proc-macro-crate", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", -] - -[[package]] -name = "jsonrpsee-server" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" -dependencies = [ - "futures-channel", - "futures-util", - "http", - "hyper", - "jsonrpsee-core", - "jsonrpsee-types", - "serde", - "serde_json", - "soketto", - "tokio 1.28.0", - "tokio-stream", - "tokio-util 0.7.7", - "tower", - "tracing", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "jsonrpsee-wasm-client" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77310456f43c6c89bcba1f6b2fc2a28300da7c341f320f5128f8c83cc63232d" -dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", -] - -[[package]] -name = "jsonrpsee-ws-client" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" -dependencies = [ - "http", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", -] - [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -2718,22 +2204,13 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -2751,9 +2228,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.140" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libloading" @@ -2767,67 +2244,33 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" - -[[package]] -name = "librocksdb-sys" -version = "0.6.1+6.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", -] - -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "link-cplusplus" -version = "1.0.8" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "local-ip-address" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -2835,13 +2278,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", - "value-bag", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach" @@ -2907,9 +2346,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -2922,7 +2361,7 @@ checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ "ahash", "metrics-macros", - "portable-atomic", + "portable-atomic 0.3.20", ] [[package]] @@ -2937,10 +2376,10 @@ dependencies = [ "metrics", "metrics-util", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "tracing", ] @@ -2950,8 +2389,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2961,22 +2400,22 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a" dependencies = [ - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", "hashbrown 0.12.3", "metrics", "num_cpus", "parking_lot 0.12.1", - "portable-atomic", + "portable-atomic 0.3.20", "quanta", "sketches-ddsketch", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -3005,14 +2444,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -3041,12 +2479,27 @@ dependencies = [ [[package]] name = "neli" -version = "0.5.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" dependencies = [ "byteorder", "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +dependencies = [ + "either", + "proc-macro2 1.0.60", + "quote 1.0.28", + "serde", + "syn 1.0.109", ] [[package]] @@ -3192,8 +2645,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3265,18 +2718,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -3286,9 +2739,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -3301,13 +2754,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3318,11 +2771,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "pkg-config", @@ -3336,7 +2788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "futures-channel", "futures-executor", "futures-util", @@ -3368,7 +2820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1a6ca9de4c8b00aa7f1a153bd76cb263287155cec642680d79d98706f3d28a" dependencies = [ "async-trait", - "futures 0.3.27", + "futures 0.3.28", "futures-util", "http", "opentelemetry", @@ -3377,7 +2829,7 @@ dependencies = [ "prost-build", "reqwest", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "tonic", "tonic-build", ] @@ -3393,9 +2845,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" +checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" dependencies = [ "log", "serde", @@ -3451,7 +2903,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -3466,16 +2918,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", -] - -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +] [[package]] name = "parking_lot" @@ -3495,7 +2941,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -3507,22 +2953,22 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets", ] [[package]] @@ -3589,15 +3035,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -3605,9 +3051,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -3615,22 +3061,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -3649,22 +3095,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3719,31 +3165,24 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] -name = "polling" -version = "2.6.0" +name = "portable-atomic" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" dependencies = [ - "autocfg 1.1.0", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite 0.2.9", - "windows-sys 0.45.0", + "portable-atomic 1.3.3", ] [[package]] name = "portable-atomic" -version = "0.3.19" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -3771,7 +3210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit 0.19.6", + "toml_edit 0.19.10", ] [[package]] @@ -3781,8 +3220,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -3793,8 +3232,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "version_check", ] @@ -3815,9 +3254,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -3828,7 +3267,7 @@ version = "1.0.0" dependencies = [ "metrics", "metrics-exporter-prometheus", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zksync_config", ] @@ -3871,8 +3310,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3889,11 +3328,11 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "log", "num_cpus", "rand 0.4.6", @@ -3908,7 +3347,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "libc", "mach", "once_cell", @@ -3935,11 +3374,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", ] [[package]] @@ -4062,7 +3501,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -4161,9 +3600,9 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "num_cpus", ] @@ -4185,26 +3624,35 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.2" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce168fea28d3e05f158bda4576cf0c844d5045bc2cc3620fa0292ed5bb5814c" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.2", ] [[package]] @@ -4213,7 +3661,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] @@ -4222,13 +3670,19 @@ version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +[[package]] +name = "regex-syntax" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" + [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -4253,10 +3707,10 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.28.0", + "tokio 1.28.2", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", @@ -4273,7 +3727,7 @@ version = "0.4.1" source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" dependencies = [ "addchain", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "blake2 0.10.6", "byteorder", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -4334,16 +3788,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rocksdb" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rsa" version = "0.6.1" @@ -4351,7 +3795,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" dependencies = [ "byteorder", - "digest 0.10.6", + "digest 0.10.7", "num-bigint-dig", "num-integer", "num-iter", @@ -4366,9 +3810,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -4399,49 +3843,47 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", + "rustls-webpki", "sct", - "webpki", ] [[package]] -name = "rustls-native-certs" -version = "0.6.2" +name = "rustls-pemfile" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", + "base64 0.21.2", ] [[package]] -name = "rustls-pemfile" -version = "1.0.2" +name = "rustls-webpki" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" dependencies = [ - "base64 0.21.0", + "ring", + "untrusted", ] [[package]] @@ -4480,12 +3922,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.5.0" @@ -4556,9 +3992,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -4569,9 +4005,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -4583,17 +4019,11 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" -[[package]] -name = "send_wrapper" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" - [[package]] name = "sentry" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" +checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" dependencies = [ "httpdate", "native-tls", @@ -4603,15 +4033,16 @@ dependencies = [ "sentry-core", "sentry-debug-images", "sentry-panic", - "tokio 1.28.0", + "sentry-tracing", + "tokio 1.28.2", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" +checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" dependencies = [ "backtrace", "once_cell", @@ -4621,9 +4052,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" +checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" dependencies = [ "hostname", "libc", @@ -4635,9 +4066,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" +checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" dependencies = [ "once_cell", "rand 0.8.5", @@ -4648,9 +4079,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" dependencies = [ "findshlibs", "once_cell", @@ -4659,56 +4090,68 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.30.0" +version = "0.31.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" +dependencies = [ + "sentry-backtrace", + "sentry-core", +] + +[[package]] +name = "sentry-tracing" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" +checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" dependencies = [ "sentry-backtrace", "sentry-core", + "tracing-core", + "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" +checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" dependencies = [ "debugid", - "getrandom 0.2.8", + "getrandom 0.2.10", "hex", "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa 1.0.6", @@ -4745,8 +4188,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -4786,7 +4229,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4810,7 +4253,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4831,7 +4274,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -4850,16 +4293,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" -[[package]] -name = "signal-hook" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -4875,7 +4308,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -4888,14 +4321,14 @@ dependencies = [ "num-bigint 0.4.3", "num-traits", "thiserror", - "time 0.3.20", + "time 0.3.22", ] [[package]] name = "sketches-ddsketch" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" @@ -4922,22 +4355,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "soketto" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" -dependencies = [ - "base64 0.13.1", - "bytes 1.4.0", - "futures 0.3.27", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha-1", -] - [[package]] name = "spin" version = "0.5.2" @@ -5006,9 +4423,9 @@ dependencies = [ "bytes 1.4.0", "chrono", "crc", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "dirs", "either", "futures-channel", @@ -5039,6 +4456,7 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", + "tokio-stream", "url", "whoami", ] @@ -5054,8 +4472,8 @@ dependencies = [ "heck 0.3.3", "hex", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "serde_json", "sha2 0.9.9", @@ -5071,9 +4489,10 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" dependencies = [ - "async-native-tls", - "async-std", "native-tls", + "once_cell", + "tokio 1.28.2", + "tokio-native-tls", ] [[package]] @@ -5123,8 +4542,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -5144,8 +4563,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -5173,28 +4592,28 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "sync_vm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +version = "1.3.3" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "cs_derive", "derivative", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -5223,15 +4642,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg 1.1.0", "cfg-if 1.0.0", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -5245,12 +4665,12 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" +checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -5265,22 +4685,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -5305,9 +4725,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa 1.0.6", "serde", @@ -5317,15 +4737,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -5376,9 +4796,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", @@ -5400,7 +4820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] @@ -5409,9 +4829,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 2.0.12", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -5421,29 +4841,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", - "tokio 1.28.0", - "webpki", + "tokio 1.28.2", ] [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] @@ -5457,29 +4876,28 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.9", - "tokio 1.28.0", + "tokio 1.28.2", ] [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes 1.4.0", "futures-core", - "futures-io", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.28.0", + "tokio 1.28.2", "tracing", ] [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" @@ -5494,9 +4912,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.6" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -5524,7 +4942,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "tokio 1.28.0", + "tokio 1.28.2", "tokio-stream", "tokio-util 0.6.10", "tower", @@ -5540,9 +4958,9 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", "prost-build", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -5559,8 +4977,8 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.28.0", - "tokio-util 0.7.7", + "tokio 1.28.2", + "tokio-util 0.7.8", "tower-layer", "tower-service", "tracing", @@ -5593,20 +5011,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -5659,9 +5077,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -5672,7 +5090,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.20", + "time 0.3.22", "tracing", "tracing-core", "tracing-log", @@ -5729,15 +5147,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -5780,11 +5198,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "native-tls", "once_cell", @@ -5793,12 +5211,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", "serde", ] @@ -5811,11 +5229,11 @@ checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "serde", ] @@ -5825,16 +5243,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - [[package]] name = "vcpkg" version = "0.2.15" @@ -5868,41 +5276,12 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "vm" -version = "0.1.0" -dependencies = [ - "hex", - "itertools", - "metrics", - "once_cell", - "thiserror", - "tracing", - "vlog", - "zk_evm", - "zkevm-assembly", - "zksync_config", - "zksync_contracts", - "zksync_crypto", - "zksync_state", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -5926,9 +5305,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5936,24 +5315,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5963,32 +5342,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -6005,9 +5384,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -6019,13 +5398,13 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "base64 0.13.1", "bytes 1.4.0", "derive_more", "ethabi", "ethereum-types", - "futures 0.3.27", + "futures 0.3.28", "futures-timer", "headers", "hex", @@ -6115,6 +5494,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-sys" version = "0.42.0" @@ -6130,37 +5518,13 @@ dependencies = [ "windows_x86_64_msvc 0.42.2", ] -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets", ] [[package]] @@ -6264,9 +5628,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -6288,27 +5652,40 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zk_evm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.3#9a1eaa98acb9e3280dbbde5b132cbf64e15fe96e" dependencies = [ + "anyhow", "lazy_static", "num 0.4.0", "serde", "serde_json", "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs", +] + +[[package]] +name = "zk_evm_abstractions" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zk_evm_abstractions.git#31361360123b4f2532ab345522c9b19510f04c31" +dependencies = [ + "anyhow", + "serde", + "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#a5f2c38305fa672ec23cf3d4d804eb50e591288c" dependencies = [ "env_logger 0.9.3", "hex", @@ -6330,7 +5707,7 @@ name = "zkevm_opcode_defs" version = "1.3.2" source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "blake2 0.10.6", "ethereum-types", "k256", @@ -6341,8 +5718,8 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.3#363ead7afaac72bd3006c49d501934747781cbb4" dependencies = [ "bincode", "circuit_testing", @@ -6382,12 +5759,12 @@ dependencies = [ "async-trait", "backon", "convert_case 0.6.0", - "futures 0.3.27", + "futures 0.3.28", "hex", "serde", "serde_json", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "zksync_config", "zksync_contracts", "zksync_dal", @@ -6445,7 +5822,6 @@ name = "zksync_dal" version = "1.0.0" dependencies = [ "anyhow", - "async-std", "bigdecimal", "bincode", "hex", @@ -6455,17 +5831,15 @@ dependencies = [ "once_cell", "serde_json", "sqlx", + "strum", "thiserror", + "tokio 1.28.2", "vlog", - "vm", "zksync_config", "zksync_contracts", "zksync_health_check", - "zksync_state", - "zksync_storage", "zksync_types", "zksync_utils", - "zksync_web3_decl", ] [[package]] @@ -6480,7 +5854,7 @@ dependencies = [ "parity-crypto", "serde", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zksync_config", "zksync_contracts", @@ -6509,13 +5883,15 @@ dependencies = [ [[package]] name = "zksync_health_check" version = "0.1.0" +dependencies = [ + "async-trait", +] [[package]] name = "zksync_mini_merkle_tree" version = "1.0.0" dependencies = [ "once_cell", - "rayon", "zksync_basic_types", "zksync_crypto", ] @@ -6524,13 +5900,13 @@ dependencies = [ name = "zksync_object_store" version = "1.0.0" dependencies = [ + "async-trait", "bincode", "google-cloud-auth", - "google-cloud-default", "google-cloud-storage", "http", "metrics", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zksync_config", "zksync_types", @@ -6545,7 +5921,7 @@ dependencies = [ "chrono", "ctrlc", "ethabi", - "futures 0.3.27", + "futures 0.3.28", "hex", "local-ip-address", "metrics", @@ -6557,7 +5933,7 @@ dependencies = [ "serde_json", "setup_key_generator_and_server", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zkevm_test_harness", "zksync_circuit_breaker", @@ -6566,7 +5942,6 @@ dependencies = [ "zksync_eth_client", "zksync_object_store", "zksync_prover_utils", - "zksync_types", "zksync_utils", "zksync_verification_key_generator_and_server", ] @@ -6576,42 +5951,16 @@ name = "zksync_prover_utils" version = "1.0.0" dependencies = [ "ctrlc", - "futures 0.3.27", + "futures 0.3.28", "metrics", "regex", "reqwest", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zksync_config", "zksync_utils", ] -[[package]] -name = "zksync_state" -version = "1.0.0" -dependencies = [ - "metrics", - "vlog", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_storage" -version = "1.0.0" -dependencies = [ - "bincode", - "byteorder", - "num_cpus", - "once_cell", - "rocksdb", - "serde", - "vlog", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_types" version = "1.0.0" @@ -6620,20 +5969,16 @@ dependencies = [ "blake2 0.10.6", "chrono", "codegen 0.1.0", - "ethbloom", - "hex", "metrics", "num 0.3.1", "once_cell", "parity-crypto", - "rayon", "rlp", "serde", "serde_json", "serde_with", "strum", "thiserror", - "tiny-keccak 1.5.0", "zk_evm", "zkevm-assembly", "zkevm_test_harness", @@ -6651,14 +5996,15 @@ dependencies = [ "anyhow", "bigdecimal", "envy", - "futures 0.3.27", + "futures 0.3.28", "hex", "itertools", + "metrics", "num 0.3.1", "reqwest", "serde", "thiserror", - "tokio 1.28.0", + "tokio 1.28.2", "vlog", "zk_evm", "zksync_basic_types", @@ -6679,18 +6025,3 @@ dependencies = [ "vlog", "zksync_types", ] - -[[package]] -name = "zksync_web3_decl" -version = "1.0.0" -dependencies = [ - "bigdecimal", - "chrono", - "itertools", - "jsonrpsee", - "rlp", - "serde", - "serde_json", - "thiserror", - "zksync_types", -] diff --git a/core/bin/prover/Cargo.toml b/core/bin/prover/Cargo.toml index 8cbc06e73b92..2b2da5696cde 100644 --- a/core/bin/prover/Cargo.toml +++ b/core/bin/prover/Cargo.toml @@ -4,14 +4,13 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] -zksync_types = { path = "../../lib/types", version = "1.0" } zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } zksync_utils = {path = "../../lib/utils", version = "1.0" } @@ -25,10 +24,10 @@ zksync_object_store = { path = "../../lib/object_store", version = "1.0" } setup_key_generator_and_server = { path = "../setup_key_generator_and_server", version = "1.0" } -api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} +api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.3", features=["gpu"], default-features=false} +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.3", features=["gpu"], default-features=false} -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} +zkevm_test_harness = { git = "https://github.com/matter-labs/zkevm_test_harness.git", branch = "v1.3.3"} tokio = { version = "1", features = ["time"] } diff --git a/core/bin/prover/src/main.rs b/core/bin/prover/src/main.rs index 74938dd98426..110260630a9b 100644 --- a/core/bin/prover/src/main.rs +++ b/core/bin/prover/src/main.rs @@ -2,23 +2,26 @@ use std::env; use std::sync::{Arc, Mutex}; use api::gpu_prover; -use futures::future; use local_ip_address::local_ip; use prover_service::run_prover::run_prover_with_remote_synthesizer; use queues::Buffer; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::{vks::VksChecker, CircuitBreakerChecker}; -use zksync_config::configs::prover_group::ProverGroupConfig; use zksync_config::{ - configs::api::Prometheus as PrometheusConfig, ApiConfig, ProverConfig, ProverConfigs, - ZkSyncConfig, + configs::chain::CircuitBreakerConfig, + configs::{api::PrometheusConfig, prover_group::ProverGroupConfig, AlertsConfig}, + ApiConfig, ContractsConfig, ETHClientConfig, ProverConfig, ProverConfigs, }; -use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; -use zksync_dal::ConnectionPool; -use zksync_eth_client::clients::http::PKSigningClient; +use zksync_dal::{ + connection::DbVariant, + gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}, + ConnectionPool, +}; +use zksync_eth_client::clients::http::QueryClient; use zksync_object_store::ObjectStoreFactory; use zksync_prover_utils::region_fetcher::{get_region, get_zone}; +use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::artifact_provider::ProverArtifactProvider; use crate::prover::ProverReporter; @@ -32,33 +35,19 @@ mod prover_params; mod socket_listener; mod synthesized_circuit_provider; -pub async fn wait_for_tasks(task_futures: Vec>) { - match future::select_all(task_futures).await.0 { - Ok(_) => { - graceful_shutdown().await; - vlog::info!("One of the actors finished its run, while it wasn't expected to do it"); - } - Err(error) => { - graceful_shutdown().await; - vlog::info!( - "One of the tokio actors unexpectedly finished with error: {:?}", - error - ); - } - } -} - async fn graceful_shutdown() { - let pool = ConnectionPool::new(Some(1), true); + let pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; let host = local_ip().expect("Failed obtaining local IP address"); let port = ProverConfigs::from_env().non_gpu.assembly_receiver_port; let region = get_region().await; let zone = get_zone().await; let address = SocketAddress { host, port }; pool.clone() - .access_storage_blocking() + .access_storage() + .await .gpu_prover_queue_dal() - .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0, region, zone); + .update_prover_instance_status(address, GpuProverInstanceStatus::Dead, 0, region, zone) + .await; } fn get_ram_per_gpu() -> u64 { @@ -104,21 +93,15 @@ fn get_prover_config_for_machine_type() -> (ProverConfig, u8) { #[tokio::main] async fn main() { - let sentry_guard = vlog::init(); - let config = ZkSyncConfig::from_env(); + vlog::init(); + vlog::trace!("starting prover"); let (prover_config, num_gpu) = get_prover_config_for_machine_type(); + let prometheus_config = PrometheusConfig { listener_port: prover_config.prometheus_port, ..ApiConfig::from_env().prometheus }; - match sentry_guard { - Some(_) => vlog::info!( - "Starting Sentry url: {}", - std::env::var("MISC_SENTRY_URL").unwrap(), - ), - None => vlog::info!("No sentry url configured"), - } let region = get_region().await; let zone = get_zone().await; @@ -136,19 +119,24 @@ async fn main() { &prover_config.key_download_url, ); env::set_var("CRS_FILE", prover_config.initial_setup_key_path.clone()); - - let eth_client = PKSigningClient::from_config(&config); + vlog::trace!("initial setup keys loaded, preparing eth_client + circuit breaker"); + let eth_client_config = ETHClientConfig::from_env(); + let circuit_breaker_config = CircuitBreakerConfig::from_env(); + let eth_client = QueryClient::new(ð_client_config.web3_url).unwrap(); + let contracts_config = ContractsConfig::from_env(); let circuit_breaker_checker = CircuitBreakerChecker::new( vec![Box::new(VksChecker::new( - &config.chain.circuit_breaker, + &circuit_breaker_config, eth_client, + contracts_config.diamond_proxy_addr, ))], - &config.chain.circuit_breaker, + &circuit_breaker_config, ); circuit_breaker_checker .check() .await .expect("Circuit breaker triggered"); + let (cb_sender, cb_receiver) = futures::channel::oneshot::channel(); // We don't have a graceful shutdown process for the prover, so `_stop_sender` is unused. // Though we still need to create a channel because circuit breaker expects `stop_receiver`. @@ -165,8 +153,8 @@ async fn main() { let mut tasks: Vec> = vec![]; tasks.push(prometheus_exporter::run_prometheus_exporter( - prometheus_config, - true, + prometheus_config.listener_port, + None, )); tasks.push(tokio::spawn( circuit_breaker_checker.run(cb_sender, stop_receiver), @@ -181,31 +169,34 @@ async fn main() { host: local_ip, port: prover_config.assembly_receiver_port, }; - let synthesized_circuit_provider = SynthesizedCircuitProvider::new( - consumer, - ConnectionPool::new(Some(1), true), - address, - region.clone(), - zone.clone(), - ); vlog::info!("local IP address is: {:?}", local_ip); tasks.push(tokio::task::spawn(incoming_socket_listener( local_ip, prover_config.assembly_receiver_port, producer, - ConnectionPool::new(Some(1), true), + ConnectionPool::new(Some(1), DbVariant::Prover).await, prover_config.specialized_prover_group_id, - region, - zone, + region.clone(), + zone.clone(), num_gpu, ))); let params = ProverParams::new(&prover_config); let store_factory = ObjectStoreFactory::from_env(); - let prover_job_reporter = ProverReporter::new(prover_config, &store_factory); + let circuit_provider_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; tasks.push(tokio::task::spawn_blocking(move || { + let rt_handle = tokio::runtime::Handle::current(); + let synthesized_circuit_provider = SynthesizedCircuitProvider::new( + consumer, + circuit_provider_pool, + address, + region, + zone, + rt_handle.clone(), + ); + let prover_job_reporter = ProverReporter::new(prover_config, &store_factory, rt_handle); run_prover_with_remote_synthesizer( synthesized_circuit_provider, ProverArtifactProvider, @@ -215,15 +206,29 @@ async fn main() { ) })); + let particular_crypto_alerts = Some(AlertsConfig::from_env().sporadic_crypto_errors_substrs); + let graceful_shutdown = Some(graceful_shutdown()); + let tasks_allowed_to_finish = false; tokio::select! { - _ = wait_for_tasks(tasks) => {}, + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = stop_signal_receiver => { vlog::info!("Stop signal received, shutting down"); + + // BEWARE, HERE BE DRAGONS. + // This is necessary because of blocking prover. See end of functions for more details. + std::process::exit(0); }, error = cb_receiver => { if let Ok(error_msg) = error { vlog::warn!("Circuit breaker received, shutting down. Reason: {}", error_msg); } }, - }; + } + + // BEWARE, HERE BE DRAGONS. + // The process hangs here if we panic outside `run_prover_with_remote_synthesizer`. + // Given the task is spawned as blocking, it's in a different thread that can't be cancelled on demand. + // See: https://docs.rs/tokio/latest/tokio/task/fn.spawn_blocking.html for more information + // Follow [PR](https://github.com/matter-labs/zksync-2-dev/pull/2129) for logic behind it + std::process::exit(-1); } diff --git a/core/bin/prover/src/prover.rs b/core/bin/prover/src/prover.rs index 00474aca669c..e3d2aaf56e0a 100644 --- a/core/bin/prover/src/prover.rs +++ b/core/bin/prover/src/prover.rs @@ -2,15 +2,17 @@ use std::{env, time::Duration}; use prover_service::JobResult::{Failure, ProofGenerated}; use prover_service::{JobReporter, JobResult}; +use tokio::runtime::Handle; use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; use zkevm_test_harness::pairing::bn256::Bn256; use zksync_config::ProverConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory}; #[derive(Debug)] pub struct ProverReporter { + rt_handle: Handle, pool: ConnectionPool, config: ProverConfig, processed_by: String, @@ -22,12 +24,18 @@ fn assembly_debug_blob_url(job_id: usize, circuit_id: u8) -> String { } impl ProverReporter { - pub(crate) fn new(config: ProverConfig, store_factory: &ObjectStoreFactory) -> Self { + pub(crate) fn new( + config: ProverConfig, + store_factory: &ObjectStoreFactory, + rt_handle: Handle, + ) -> Self { + let pool = rt_handle.block_on(ConnectionPool::new(Some(1), DbVariant::Prover)); Self { - pool: ConnectionPool::new(Some(1), true), + pool, config, processed_by: env::var("POD_NAME").unwrap_or("Unknown".to_string()), - object_store: store_factory.create_store(), + object_store: rt_handle.block_on(store_factory.create_store()), + rt_handle, } } @@ -54,38 +62,34 @@ impl ProverReporter { "circuit_type" => circuit_type, ); let job_id = job_id as u32; - let mut connection = self.pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + self.rt_handle.block_on(async { + let mut connection = self.pool.access_storage().await; + let mut transaction = connection.start_transaction().await; - transaction - .prover_dal() - .save_proof(job_id, duration, serialized, &self.processed_by); - let prover_job_metadata = transaction - .prover_dal() - .get_prover_job_by_id(job_id) - .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)); + transaction + .prover_dal() + .save_proof(job_id, duration, serialized, &self.processed_by) + .await; + let _prover_job_metadata = transaction + .prover_dal() + .get_prover_job_by_id(job_id) + .await + .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)); - if prover_job_metadata.aggregation_round.next().is_none() { - let block = transaction - .blocks_dal() - .get_block_header(prover_job_metadata.block_number) - .unwrap(); - metrics::counter!( - "server.processed_txs", - block.tx_count() as u64, - "stage" => "prove_generated" - ); - } - transaction.commit_blocking(); + transaction.commit().await; + }); } fn get_circuit_type(&self, job_id: usize) -> String { - let prover_job_metadata = self - .pool - .access_storage_blocking() - .prover_dal() - .get_prover_job_by_id(job_id as u32) - .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)); + let prover_job_metadata = self.rt_handle.block_on(async { + self.pool + .access_storage() + .await + .prover_dal() + .get_prover_job_by_id(job_id as u32) + .await + .unwrap_or_else(|| panic!("No job with id: {} exist", job_id)) + }); prover_job_metadata.circuit_type } } @@ -99,10 +103,14 @@ impl JobReporter for ProverReporter { job_id, error ); - self.pool - .access_storage_blocking() - .prover_dal() - .save_proof_error(job_id as u32, error, self.config.max_attempts); + self.rt_handle.block_on(async { + self.pool + .access_storage() + .await + .prover_dal() + .save_proof_error(job_id as u32, error, self.config.max_attempts) + .await; + }); } ProofGenerated(job_id, duration, proof, index) => { self.handle_successful_proof_generation(job_id, proof, duration, index); @@ -194,8 +202,11 @@ impl JobReporter for ProverReporter { error, ); let blob_url = assembly_debug_blob_url(job_id, circuit_id); - self.object_store - .put_raw(Bucket::ProverJobs, &blob_url, assembly) + let put_task = self + .object_store + .put_raw(Bucket::ProverJobs, &blob_url, assembly); + self.rt_handle + .block_on(put_task) .expect("Failed saving debug assembly to GCS"); } JobResult::AssemblyTransferred(job_id, duration) => { diff --git a/core/bin/prover/src/socket_listener.rs b/core/bin/prover/src/socket_listener.rs index 4da5f4a9e9ba..1a4a1a567c69 100644 --- a/core/bin/prover/src/socket_listener.rs +++ b/core/bin/prover/src/socket_listener.rs @@ -1,11 +1,12 @@ use crate::synthesized_circuit_provider::SharedAssemblyQueue; use queues::IsQueue; -use std::io::copy; -use std::net::{IpAddr, SocketAddr, TcpListener, TcpStream}; +use std::net::{IpAddr, SocketAddr}; use std::time::Instant; use zksync_dal::gpu_prover_queue_dal::{GpuProverInstanceStatus, SocketAddress}; use zksync_dal::ConnectionPool; +use tokio::{io::copy, net::{TcpListener, TcpStream}}; + #[allow(clippy::too_many_arguments)] pub async fn incoming_socket_listener( host: IpAddr, @@ -24,29 +25,38 @@ pub async fn incoming_socket_listener( port ); let listener = TcpListener::bind(listening_address) + .await .unwrap_or_else(|_| panic!("Failed binding address: {:?}", listening_address)); let address = SocketAddress { host, port }; - pool.access_storage_blocking() + let queue_capacity = queue.lock().unwrap().capacity(); + pool.access_storage() + .await .gpu_prover_queue_dal() .insert_prover_instance( address.clone(), - queue.lock().unwrap().capacity(), + queue_capacity, specialized_prover_group_id, region.clone(), zone.clone(), num_gpu, - ); + ) + .await; let mut now = Instant::now(); - for stream in listener.incoming() { + loop { + let stream = match listener.accept().await { + Ok(stream) => stream.0, + Err(e) => { + panic!("could not accept connection: {e:?}"); + } + }; vlog::trace!( "Received new assembly send connection, waited for {}ms.", now.elapsed().as_millis() ); - let stream = stream.expect("Stream closed early"); handle_incoming_file( stream, queue.clone(), @@ -54,13 +64,14 @@ pub async fn incoming_socket_listener( address.clone(), region.clone(), zone.clone(), - ); + ) + .await; now = Instant::now(); } } -fn handle_incoming_file( +async fn handle_incoming_file( mut stream: TcpStream, queue: SharedAssemblyQueue, pool: ConnectionPool, @@ -70,31 +81,31 @@ fn handle_incoming_file( ) { let mut assembly: Vec = vec![]; let started_at = Instant::now(); - copy(&mut stream, &mut assembly).expect("Failed reading from stream"); + copy(&mut stream, &mut assembly).await.expect("Failed reading from stream"); let file_size_in_gb = assembly.len() / (1024 * 1024 * 1024); vlog::trace!( "Read file of size: {}GB from stream took: {} seconds", file_size_in_gb, started_at.elapsed().as_secs() ); - let mut assembly_queue = queue.lock().unwrap(); + let (queue_free_slots, status) = { + let mut assembly_queue = queue.lock().unwrap(); - assembly_queue - .add(assembly) - .expect("Failed saving assembly to queue"); - let status = if assembly_queue.capacity() == assembly_queue.size() { - GpuProverInstanceStatus::Full - } else { - GpuProverInstanceStatus::Available + assembly_queue + .add(assembly) + .expect("Failed saving assembly to queue"); + let status = if assembly_queue.capacity() == assembly_queue.size() { + GpuProverInstanceStatus::Full + } else { + GpuProverInstanceStatus::Available + }; + let queue_free_slots = assembly_queue.capacity() - assembly_queue.size(); + (queue_free_slots, status) }; - pool.access_storage_blocking() + pool.access_storage() + .await .gpu_prover_queue_dal() - .update_prover_instance_status( - address, - status, - assembly_queue.capacity() - assembly_queue.size(), - region, - zone, - ); + .update_prover_instance_status(address, status, queue_free_slots, region, zone) + .await; } diff --git a/core/bin/prover/src/synthesized_circuit_provider.rs b/core/bin/prover/src/synthesized_circuit_provider.rs index 1424ee9e7a24..1a9ac811369d 100644 --- a/core/bin/prover/src/synthesized_circuit_provider.rs +++ b/core/bin/prover/src/synthesized_circuit_provider.rs @@ -5,12 +5,14 @@ use std::sync::{Arc, Mutex}; use prover_service::RemoteSynthesizer; use queues::{Buffer, IsQueue}; +use tokio::runtime::Handle; use zksync_dal::gpu_prover_queue_dal::SocketAddress; use zksync_dal::ConnectionPool; pub type SharedAssemblyQueue = Arc>>>; pub struct SynthesizedCircuitProvider { + rt_handle: Handle, queue: SharedAssemblyQueue, pool: ConnectionPool, address: SocketAddress, @@ -25,8 +27,10 @@ impl SynthesizedCircuitProvider { address: SocketAddress, region: String, zone: String, + rt_handle: Handle, ) -> Self { Self { + rt_handle, queue, pool, address, @@ -44,15 +48,19 @@ impl RemoteSynthesizer for SynthesizedCircuitProvider { Ok(blob) => { let queue_free_slots = assembly_queue.capacity() - assembly_queue.size(); if is_full { - self.pool - .access_storage_blocking() - .gpu_prover_queue_dal() - .update_prover_instance_from_full_to_available( - self.address.clone(), - queue_free_slots, - self.region.clone(), - self.zone.clone(), - ); + self.rt_handle.block_on(async { + self.pool + .access_storage() + .await + .gpu_prover_queue_dal() + .update_prover_instance_from_full_to_available( + self.address.clone(), + queue_free_slots, + self.region.clone(), + self.zone.clone(), + ) + .await + }); } vlog::trace!( "Queue free slot {} for capacity {}", diff --git a/core/bin/prover_fri/Cargo.toml b/core/bin/prover_fri/Cargo.toml new file mode 100644 index 000000000000..3de420d2cdb7 --- /dev/null +++ b/core/bin/prover_fri/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "zksync_prover_fri" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_dal = { path = "../../lib/dal", version = "1.0" } +zksync_config = { path = "../../lib/config", version = "1.0" } +prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_object_store = { path = "../../lib/object_store", version = "1.0" } +zksync_prover_utils = {path = "../../lib/prover_utils", version = "1.0" } +zksync_queued_job_processor = { path = "../../lib/queued_job_processor", version = "1.0" } +zksync_witness_generator = { path = "../witness_generator", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } +circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} + +tokio = { version = "1", features = ["time"] } +futures = { version = "0.3", features = ["compat"] } +ctrlc = { version = "3.1", features = ["termination"] } +metrics = "0.20.0" diff --git a/core/bin/prover_fri/src/main.rs b/core/bin/prover_fri/src/main.rs new file mode 100644 index 000000000000..31972899cde5 --- /dev/null +++ b/core/bin/prover_fri/src/main.rs @@ -0,0 +1,128 @@ +#![feature(generic_const_exprs)] + +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::oneshot; +use zksync_vk_setup_data_server_fri::{get_setup_data_for_circuit_type, ProverServiceDataKey}; + +use zksync_config::configs::fri_prover_group::{CircuitIdRoundTuple, FriProverGroupConfig}; +use zksync_config::configs::{FriProverConfig, PrometheusConfig}; +use zksync_config::{ApiConfig, ObjectStoreConfig}; +use zksync_dal::connection::DbVariant; +use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStoreFactory; +use zksync_queued_job_processor::JobProcessor; +use zksync_utils::wait_for_tasks::wait_for_tasks; + +use crate::prover_job_processor::{GoldilocksProverSetupData, Prover, SetupLoadMode}; + +mod prover_job_processor; + +#[tokio::main] +async fn main() { + vlog::init(); + let sentry_guard = vlog::init_sentry(); + let prover_config = FriProverConfig::from_env(); + let prometheus_config = PrometheusConfig { + listener_port: prover_config.prometheus_port, + ..ApiConfig::from_env().prometheus + }; + + match sentry_guard { + Some(_) => vlog::info!( + "Starting Sentry url: {}", + std::env::var("MISC_SENTRY_URL").unwrap(), + ), + None => vlog::info!("No sentry url configured"), + } + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .expect("Error setting Ctrl+C handler"); + + let (stop_sender, stop_receiver) = tokio::sync::watch::channel(false); + let blob_store = ObjectStoreFactory::from_env().create_store().await; + let public_blob_store = ObjectStoreFactory::new(ObjectStoreConfig::public_from_env()) + .create_store() + .await; + + vlog::info!("Starting FRI proof generation"); + let pool = ConnectionPool::new(None, DbVariant::Prover).await; + let circuit_ids_for_round_to_be_proven = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(prover_config.specialized_group_id) + .unwrap_or(vec![]); + + let setup_load_mode = build_prover_setup_load_mode_using_config(&prover_config); + let prover = Prover::new( + blob_store, + public_blob_store, + FriProverConfig::from_env(), + pool, + setup_load_mode, + circuit_ids_for_round_to_be_proven, + ); + let tasks = vec![ + prometheus_exporter::run_prometheus_exporter(prometheus_config.listener_port, None), + tokio::spawn(prover.run(stop_receiver, None)), + ]; + + let particular_crypto_alerts = None; + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; + tokio::select! { + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, + _ = stop_signal_receiver => { + vlog::info!("Stop signal received, shutting down"); + }, + } + + stop_sender.send(true).ok(); +} + +fn build_prover_setup_load_mode_using_config(config: &FriProverConfig) -> SetupLoadMode { + match config.setup_load_mode { + zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, + zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { + let cache = load_setup_data_cache(config.specialized_group_id); + SetupLoadMode::FromMemory(cache) + } + } +} + +fn load_setup_data_cache( + specialized_group_id: u8, +) -> HashMap> { + vlog::info!( + "Loading setup data cache for group {}", + specialized_group_id + ); + let prover_setup_metadata_list = FriProverGroupConfig::from_env() + .get_circuit_ids_for_group_id(specialized_group_id) + .expect( + "At least one circuit should be configured for group when running in FromMemory mode", + ); + vlog::info!( + "for group {} configured setup metadata are {:?}", + specialized_group_id, + prover_setup_metadata_list + ); + let mut cache = HashMap::new(); + for prover_setup_metadata in prover_setup_metadata_list { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data = get_setup_data_for_circuit_type(key.clone()); + cache.insert(key, Arc::new(setup_data)); + } + cache +} + +fn setup_metadata_to_setup_data_key(setup_metadata: &CircuitIdRoundTuple) -> ProverServiceDataKey { + ProverServiceDataKey { + circuit_id: setup_metadata.circuit_id, + round: setup_metadata.aggregation_round.into(), + } +} diff --git a/core/bin/prover_fri/src/prover_job_processor.rs b/core/bin/prover_fri/src/prover_job_processor.rs new file mode 100644 index 000000000000..58840a537207 --- /dev/null +++ b/core/bin/prover_fri/src/prover_job_processor.rs @@ -0,0 +1,399 @@ +use std::collections::HashMap; +use std::{sync::Arc, time::Instant}; +use tokio::task::JoinHandle; + +use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; +use circuit_definitions::boojum::algebraic_props::sponge::GenericAlgebraicSponge; +use circuit_definitions::boojum::cs::implementations::pow::NoPow; +use circuit_definitions::boojum::implementations::poseidon2::Poseidon2Goldilocks; +use circuit_definitions::boojum::worker::Worker; +use circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof, +}; +use circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit, +}; +use circuit_definitions::{ + base_layer_proof_config, recursion_layer_proof_config, ZkSyncDefaultRoundFunction, +}; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; +use zkevm_test_harness::prover_utils::{ + prove_base_layer_circuit, prove_recursion_layer_circuit, verify_base_layer_proof, + verify_recursion_layer_proof, +}; +use zksync_config::configs::FriProverConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::{FriCircuitKey, ObjectStore}; +use zksync_queued_job_processor::{async_trait, JobProcessor}; +use zksync_types::L1BatchNumber; + +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; +use zksync_vk_setup_data_server_fri::{ + get_setup_data_for_circuit_type, ProverServiceDataKey, ProverSetupData, +}; +use zksync_witness_utils::{ + get_base_layer_circuit_id_for_recursive_layer, CircuitWrapper, FriProofWrapper, +}; + +pub type GoldilocksProverSetupData = ProverSetupData< + GoldilocksField, + GoldilocksField, + GenericAlgebraicSponge< + GoldilocksField, + GoldilocksField, + 8, + 12, + 4, + Poseidon2Goldilocks, + AbsorptionModeOverwrite, + >, +>; + +pub enum SetupLoadMode { + FromMemory(HashMap>), + FromDisk, +} + +pub struct Prover { + blob_store: Box, + public_blob_store: Box, + config: Arc, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + // Only pick jobs for the configured circuit id and aggregation rounds. + // Empty means all jobs are picked. + circuit_ids_for_round_to_be_proven: Vec, +} + +impl Prover { + pub fn new( + blob_store: Box, + public_blob_store: Box, + config: FriProverConfig, + prover_connection_pool: ConnectionPool, + setup_load_mode: SetupLoadMode, + circuit_ids_for_round_to_be_proven: Vec, + ) -> Self { + Prover { + blob_store, + public_blob_store, + config: Arc::new(config), + prover_connection_pool, + setup_load_mode, + circuit_ids_for_round_to_be_proven, + } + } + + fn get_setup_data(&self, key: ProverServiceDataKey) -> Arc { + match &self.setup_load_mode { + SetupLoadMode::FromMemory(cache) => cache + .get(&key) + .expect("Setup data not found in cache") + .clone(), + SetupLoadMode::FromDisk => { + let started_at = Instant::now(); + let artifact: GoldilocksProverSetupData = + get_setup_data_for_circuit_type(key.clone()); + metrics::histogram!( + "prover_fri.prover.setup_data_load_time", + started_at.elapsed(), + "circuit_type" => key.circuit_id.to_string(), + ); + Arc::new(artifact) + } + } + } + + fn prove( + job: ProverJob, + config: Arc, + setup_data: Arc, + ) -> ProverArtifacts { + let proof = match job.circuit_wrapper { + CircuitWrapper::Base(base_circuit) => { + Self::prove_base_layer(job.job_id, base_circuit, config, setup_data) + } + CircuitWrapper::Recursive(recursive_circuit) => { + Self::prove_recursive_layer(job.job_id, recursive_circuit, config, setup_data) + } + }; + ProverArtifacts::new(job.block_number, proof) + } + + fn prove_recursive_layer( + job_id: u32, + circuit: ZkSyncRecursiveLayerCircuit, + config: Arc, + artifact: Arc, + ) -> FriProofWrapper { + let worker = Worker::new(); + let circuit_id = circuit.numeric_circuit_type(); + let started_at = Instant::now(); + let proof = prove_recursion_layer_circuit::( + circuit.clone(), + &worker, + recursion_layer_proof_config(), + &artifact.setup_base, + &artifact.setup, + &artifact.setup_tree, + &artifact.vk, + &artifact.vars_hint, + &artifact.wits_hint, + &artifact.finalization_hint, + ); + metrics::histogram!( + "prover_fri.prover.proof_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "recursive", + ); + if config + .recursive_layer_circuit_ids_to_be_verified + .contains(&circuit_id) + { + let started_at = Instant::now(); + let is_valid = verify_recursion_layer_proof::(&circuit, &proof, &artifact.vk); + metrics::histogram!( + "prover_fri.prover.proof_verification_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "recursive", + ); + if !is_valid { + vlog::error!( + "Failed to verify recursive layer proof for job-id: {} circuit type: {}", + job_id, + circuit_id + ); + } + } + FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) + } + + fn prove_base_layer( + job_id: u32, + circuit: ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, + config: Arc, + artifact: Arc, + ) -> FriProofWrapper { + let worker = Worker::new(); + let circuit_id = circuit.numeric_circuit_type(); + let started_at = Instant::now(); + let proof = prove_base_layer_circuit::( + circuit.clone(), + &worker, + base_layer_proof_config(), + &artifact.setup_base, + &artifact.setup, + &artifact.setup_tree, + &artifact.vk, + &artifact.vars_hint, + &artifact.wits_hint, + &artifact.finalization_hint, + ); + metrics::histogram!( + "prover_fri.prover.proof_generation_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "base", + ); + if config + .base_layer_circuit_ids_to_be_verified + .contains(&circuit_id) + { + let started_at = Instant::now(); + let is_valid = verify_base_layer_proof::(&circuit, &proof, &artifact.vk); + metrics::histogram!( + "prover_fri.prover.proof_verification_time", + started_at.elapsed(), + "circuit_type" => circuit_id.to_string(), + "layer" => "base", + ); + if !is_valid { + vlog::error!( + "Failed to verify base layer proof for job-id: {} circuit_type {}", + job_id, + circuit_id + ); + } + } + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } +} + +pub struct ProverJob { + block_number: L1BatchNumber, + job_id: u32, + circuit_wrapper: CircuitWrapper, + setup_data_key: ProverServiceDataKey, +} + +impl ProverJob { + fn new( + block_number: L1BatchNumber, + job_id: u32, + circuit_wrapper: CircuitWrapper, + setup_data_key: ProverServiceDataKey, + ) -> Self { + Self { + block_number, + job_id, + circuit_wrapper, + setup_data_key, + } + } +} + +pub struct ProverArtifacts { + block_number: L1BatchNumber, + proof_wrapper: FriProofWrapper, +} + +impl ProverArtifacts { + fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { + Self { + block_number, + proof_wrapper, + } + } +} + +#[async_trait] +impl JobProcessor for Prover { + type Job = ProverJob; + type JobId = u32; + type JobArtifacts = ProverArtifacts; + const SERVICE_NAME: &'static str = "FriProver"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut storage = self.prover_connection_pool.access_storage().await; + let prover_job = match self.circuit_ids_for_round_to_be_proven.is_empty() { + false => { + // Specialized prover: proving subset of configured circuits. + storage + .fri_prover_jobs_dal() + .get_next_job_for_circuit_id_round(&self.circuit_ids_for_round_to_be_proven) + .await + } + true => { + // Generalized prover: proving all circuits. + storage.fri_prover_jobs_dal().get_next_job().await + } + }?; + vlog::info!("Started processing prover job: {:?}", prover_job); + + let circuit_key = FriCircuitKey { + block_number: prover_job.block_number, + sequence_number: prover_job.sequence_number, + circuit_id: prover_job.circuit_id, + aggregation_round: prover_job.aggregation_round, + depth: prover_job.depth, + }; + let started_at = Instant::now(); + let input = self + .blob_store + .get(circuit_key) + .await + .unwrap_or_else(|err| panic!("{err:?}")); + metrics::histogram!( + "prover_fri.prover.blob_fetch_time", + started_at.elapsed(), + "circuit_type" => prover_job.circuit_id.to_string(), + "aggregation_round" => format!("{:?}", prover_job.aggregation_round), + ); + let setup_data_key = ProverServiceDataKey { + circuit_id: prover_job.circuit_id, + round: prover_job.aggregation_round, + }; + + Some(( + prover_job.id, + ProverJob::new( + prover_job.block_number, + prover_job.id, + input, + setup_data_key, + ), + )) + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.prover_connection_pool + .access_storage() + .await + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + } + + async fn process_job( + &self, + job: Self::Job, + _started_at: Instant, + ) -> JoinHandle { + let config = Arc::clone(&self.config); + let setup_data = self.get_setup_data(job.setup_data_key.clone()); + tokio::task::spawn_blocking(move || Self::prove(job, config, setup_data)) + } + + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: Self::JobArtifacts, + ) { + vlog::info!( + "Successfully proven job: {}, took: {:?}", + job_id, + started_at.elapsed() + ); + let proof = artifacts.proof_wrapper; + + // We save the scheduler proofs in public bucket, + // so that it can be verified independently while we're doing shadow proving + let circuit_type = match &proof { + FriProofWrapper::Base(base) => base.numeric_circuit_type(), + FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { + ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { + self.public_blob_store + .put(artifacts.block_number.0, &proof) + .await + .unwrap(); + recursive_circuit.numeric_circuit_type() + } + _ => recursive_circuit.numeric_circuit_type(), + }, + }; + + let blob_save_started_at = Instant::now(); + let blob_url = self.blob_store.put(job_id, &proof).await.unwrap(); + metrics::histogram!( + "prover_fri.prover.blob_save_time", + blob_save_started_at.elapsed(), + "circuit_type" => circuit_type.to_string(), + ); + + let mut prover_connection = self.prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; + let job_metadata = transaction + .fri_prover_jobs_dal() + .save_proof(job_id, started_at.elapsed(), &blob_url) + .await; + if job_metadata.is_node_final_proof { + transaction + .fri_scheduler_dependency_tracker_dal() + .set_final_prover_job_id_for_l1_batch( + get_base_layer_circuit_id_for_recursive_layer(job_metadata.circuit_id), + job_id, + job_metadata.block_number, + ) + .await; + } + transaction.commit().await; + } +} diff --git a/core/bin/setup_key_generator_and_server/Cargo.lock b/core/bin/setup_key_generator_and_server/Cargo.lock index cf2cbde931d9..e29e17309268 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.lock +++ b/core/bin/setup_key_generator_and_server/Cargo.lock @@ -77,20 +77,26 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -111,14 +117,14 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "api" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bellman_ce", "cfg-if 1.0.0", @@ -144,15 +150,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -171,15 +177,15 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "8868f09ff8cea88b079da74ae569d9b8c62a23c68c746240b704ee6f7525c89c" [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -188,24 +194,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -263,9 +269,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -278,7 +284,7 @@ name = "bellman_ce" version = "0.3.2" source = "git+https://github.com/matter-labs/bellman?branch=dev#bbac0559fdc440b2331eca1c347a30559a3dd969" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bit-vec", "blake2s_const", "blake2s_simd", @@ -332,8 +338,8 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "regex", "rustc-hash", "shlex", @@ -357,9 +363,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" [[package]] name = "bitvec" @@ -390,7 +396,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -462,9 +468,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -519,13 +525,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "rustc-serialize", "serde", @@ -553,9 +559,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -611,16 +617,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "const-oid" version = "0.9.2" @@ -651,15 +647,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] @@ -685,11 +681,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-epoch 0.9.14", + "crossbeam-epoch 0.9.15", "crossbeam-queue 0.3.8", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -704,12 +700,12 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -730,8 +726,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.14", - "crossbeam-utils 0.8.15", + "crossbeam-epoch 0.9.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -751,14 +747,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", - "memoffset 0.8.0", + "crossbeam-utils 0.8.16", + "memoffset 0.9.0", "scopeguard", ] @@ -780,7 +776,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", ] [[package]] @@ -796,9 +792,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -854,11 +850,11 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] @@ -872,50 +868,6 @@ dependencies = [ "cipher", ] -[[package]] -name = "cxx" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "scratch", - "syn 1.0.109", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" -dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", -] - [[package]] name = "darling" version = "0.13.4" @@ -934,8 +886,8 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -947,7 +899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -977,8 +929,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -989,8 +941,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustc_version", "syn 1.0.109", ] @@ -1006,9 +958,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1042,7 +994,7 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array", "group", @@ -1099,13 +1051,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1201,8 +1153,8 @@ dependencies = [ "num-bigint 0.4.3", "num-integer", "num-traits", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "serde", "syn 1.0.109", ] @@ -1260,9 +1212,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1341,9 +1293,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1356,9 +1308,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1366,15 +1318,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1384,9 +1336,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-locks" @@ -1400,26 +1352,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -1429,9 +1381,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1447,9 +1399,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1468,9 +1420,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1479,9 +1431,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -1492,7 +1444,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gpu-ffi" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bindgen", "crossbeam 0.8.2", @@ -1505,7 +1457,7 @@ dependencies = [ [[package]] name = "gpu-prover" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -1531,9 +1483,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.16" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes 1.4.0", "fnv", @@ -1543,16 +1495,16 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.26.0", - "tokio-util 0.7.7", + "tokio 1.28.2", + "tokio-util 0.7.8", "tracing", ] [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -1654,7 +1606,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1710,9 +1662,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -1726,7 +1678,7 @@ dependencies = [ "itoa", "pin-project-lite 0.2.9", "socket2", - "tokio 1.26.0", + "tokio 1.28.2", "tower-service", "tracing", "want", @@ -1734,14 +1686,14 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", "rustls", - "tokio 1.26.0", + "tokio 1.28.2", "tokio-rustls", ] @@ -1753,7 +1705,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.2", "tokio-io-timeout", ] @@ -1766,32 +1718,31 @@ dependencies = [ "bytes 1.4.0", "hyper", "native-tls", - "tokio 1.26.0", + "tokio 1.28.2", "tokio-native-tls", ] [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -1813,9 +1764,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1854,16 +1805,16 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown", @@ -1880,30 +1831,31 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1923,9 +1875,9 @@ checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1959,9 +1911,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -1980,9 +1932,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.140" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libloading" @@ -1994,26 +1946,17 @@ dependencies = [ "winapi", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -2021,12 +1964,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "match_cfg" @@ -2072,9 +2012,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg 1.1.0", ] @@ -2087,7 +2027,7 @@ checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" dependencies = [ "ahash", "metrics-macros", - "portable-atomic", + "portable-atomic 0.3.20", ] [[package]] @@ -2096,16 +2036,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -2124,14 +2064,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2272,8 +2211,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2344,18 +2283,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -2365,9 +2304,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags 1.3.2", "cfg-if 1.0.0", @@ -2380,13 +2319,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -2397,11 +2336,10 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "pkg-config", @@ -2415,7 +2353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" dependencies = [ "async-trait", - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "futures-channel", "futures-executor", "futures-util", @@ -2456,7 +2394,7 @@ dependencies = [ "prost-build", "reqwest", "thiserror", - "tokio 1.26.0", + "tokio 1.28.2", "tonic", "tonic-build", ] @@ -2472,9 +2410,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0" +checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" dependencies = [ "log", "serde", @@ -2530,7 +2468,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -2545,8 +2483,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2562,15 +2500,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", "smallvec", - "windows-sys 0.45.0", + "windows-targets", ] [[package]] @@ -2619,15 +2557,15 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -2635,9 +2573,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -2645,22 +2583,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -2679,22 +2617,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -2727,15 +2665,24 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "portable-atomic" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" +dependencies = [ + "portable-atomic 1.3.3", +] + +[[package]] +name = "portable-atomic" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -2773,8 +2720,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -2785,8 +2732,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "version_check", ] @@ -2807,9 +2754,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -2852,8 +2799,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -2870,11 +2817,11 @@ dependencies = [ [[package]] name = "prover-service" version = "0.1.0" -source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.2#ac731f4baf40d39cb650ff5757b21f4a6228582e" +source = "git+https://github.com/matter-labs/heavy-ops-service.git?branch=v1.3.3#b46ecf3ea167456554e3fac69b33a1e56f6a47b2" dependencies = [ "api", "bincode", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "log", "num_cpus", "rand 0.4.6", @@ -2894,11 +2841,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", ] [[package]] @@ -3021,7 +2968,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -3111,9 +3058,9 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ - "crossbeam-channel 0.5.7", + "crossbeam-channel 0.5.8", "crossbeam-deque 0.8.3", - "crossbeam-utils 0.8.15", + "crossbeam-utils 0.8.16", "num_cpus", ] @@ -3128,22 +3075,22 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.2", ] [[package]] @@ -3152,22 +3099,28 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -3191,7 +3144,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "tokio 1.26.0", + "tokio 1.28.2", "tokio-native-tls", "tokio-rustls", "tower-service", @@ -3209,7 +3162,7 @@ version = "0.4.1" source = "git+https://github.com/matter-labs/rescue-poseidon.git#f611a3353e48cf42153e44d89ed90da9bc5934e8" dependencies = [ "addchain", - "arrayvec 0.7.2", + "arrayvec 0.7.3", "blake2 0.10.6", "byteorder", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -3272,9 +3225,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -3305,28 +3258,28 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", "ring", + "rustls-webpki", "sct", - "webpki", ] [[package]] @@ -3335,7 +3288,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -3374,12 +3337,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.5.0" @@ -3450,9 +3407,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -3463,9 +3420,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -3479,9 +3436,9 @@ checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "sentry" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5ce6d3512e2617c209ec1e86b0ca2fea06454cd34653c91092bf0f3ec41f8e3" +checksum = "9e0bd2cbc3398be701a933e5b7357a4b6b1f94038d2054f118cba90b481a9fbe" dependencies = [ "httpdate", "native-tls", @@ -3491,15 +3448,16 @@ dependencies = [ "sentry-core", "sentry-debug-images", "sentry-panic", - "tokio 1.26.0", + "sentry-tracing", + "tokio 1.28.2", "ureq", ] [[package]] name = "sentry-backtrace" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7fe408d4d1f8de188a9309916e02e129cbe51ca19e55badea5a64899399b1a" +checksum = "9cf043f9bcb6c9ae084b7f10fb363a697c924badcbe7dac2dbeecea31271ed0c" dependencies = [ "backtrace", "once_cell", @@ -3509,9 +3467,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5695096a059a89973ec541062d331ff4c9aeef9c2951416c894f0fff76340e7d" +checksum = "16bde19e361cff463253371dbabee51dab416c6f9285d6e62106539f96d12079" dependencies = [ "hostname", "libc", @@ -3523,9 +3481,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b22828bfd118a7b660cf7a155002a494755c0424cebb7061e4743ecde9c7dbc" +checksum = "fe345c342f17e48b65451f424ce0848405b6b3a84fa0007ba444b84754bf760a" dependencies = [ "once_cell", "rand 0.8.5", @@ -3536,9 +3494,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9164d44a2929b1b7670afd7e87552514b70d3ae672ca52884639373d912a3d" +checksum = "be9460cda9409f799f839510ff3b2ab8db6e457f3085298e18eefc463948e157" dependencies = [ "findshlibs", "once_cell", @@ -3547,56 +3505,68 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ced2a7a8c14899d58eec402d946f69d5ed26a3fc363a7e8b1e5cb88473a01" +checksum = "063ac270f11157e435f8b133a007669a3e1a7920e23374485357a8692996188f" dependencies = [ "sentry-backtrace", "sentry-core", ] +[[package]] +name = "sentry-tracing" +version = "0.31.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc167b6746500ea4bb86c2c13afe7ca6f75f2ed1bcfd84243e870780b8ced529" +dependencies = [ + "sentry-backtrace", + "sentry-core", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "sentry-types" -version = "0.30.0" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360ee3270f7a4a1eee6c667f7d38360b995431598a73b740dfe420da548d9cc9" +checksum = "62d10a5962144f5fb65bb1290551623e6b976f442cb2fcb4e1dfe9fe6f8e8df4" dependencies = [ "debugid", - "getrandom 0.2.8", + "getrandom 0.2.10", "hex", "serde", "serde_json", "thiserror", - "time 0.3.20", + "time 0.3.22", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.156" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "itoa", "ryu", @@ -3632,8 +3602,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3660,7 +3630,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3684,7 +3654,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3705,7 +3675,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -3739,7 +3709,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -3827,8 +3797,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3848,8 +3818,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -3877,17 +3847,28 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" +dependencies = [ + "proc-macro2 1.0.60", + "quote 1.0.28", "unicode-ident", ] [[package]] name = "sync_vm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.2#681495e53b2f5c399943ee3c945f3143917e7930" +version = "1.3.3" +source = "git+https://github.com/matter-labs/sync_vm.git?branch=v1.3.3#95294cb3d497d4534e7fb85bf5a8faf5c2ed354b" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "cs_derive", "derivative", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", @@ -3916,15 +3897,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg 1.1.0", "cfg-if 1.0.0", "fastrand", "redox_syscall", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -3938,12 +3920,12 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" +checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", + "proc-macro2 1.0.60", + "quote 1.0.28", "syn 1.0.109", ] @@ -3958,22 +3940,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -3999,9 +3981,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -4011,15 +3993,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -4070,14 +4052,13 @@ dependencies = [ [[package]] name = "tokio" -version = "1.26.0" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg 1.1.0", "bytes 1.4.0", "libc", - "memchr", "mio", "num_cpus", "parking_lot", @@ -4085,7 +4066,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4095,18 +4076,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.2", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] @@ -4116,29 +4097,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", - "tokio 1.26.0", + "tokio 1.28.2", ] [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", - "tokio 1.26.0", - "webpki", + "tokio 1.28.2", ] [[package]] name = "tokio-stream" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.2", ] [[package]] @@ -4152,34 +4132,34 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.2", ] [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes 1.4.0", "futures-core", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.26.0", + "tokio 1.28.2", "tracing", ] [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" [[package]] name = "toml_edit" -version = "0.19.6" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" +checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739" dependencies = [ "indexmap", "toml_datetime", @@ -4207,7 +4187,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "tokio 1.26.0", + "tokio 1.28.2", "tokio-stream", "tokio-util 0.6.10", "tower", @@ -4223,9 +4203,9 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.60", "prost-build", - "quote 1.0.26", + "quote 1.0.28", "syn 1.0.109", ] @@ -4242,8 +4222,8 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.26.0", - "tokio-util 0.7.7", + "tokio 1.28.2", + "tokio-util 0.7.8", "tower-layer", "tower-service", "tracing", @@ -4276,20 +4256,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -4342,9 +4322,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -4355,7 +4335,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", - "time 0.3.20", + "time 0.3.22", "tracing", "tracing-core", "tracing-log", @@ -4403,15 +4383,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -4448,11 +4428,11 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "d4b45063f47caea744e48f5baa99169bd8bd9b882d80a99941141327bbb00f99" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "native-tls", "once_cell", @@ -4461,23 +4441,23 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", "serde", ] [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "serde", ] @@ -4522,11 +4502,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -4550,9 +4529,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4560,24 +4539,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -4587,38 +4566,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.26", + "quote 1.0.28", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.26", - "syn 1.0.109", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -4630,7 +4609,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ - "arrayvec 0.7.2", + "arrayvec 0.7.3", "base64 0.13.1", "bytes 1.4.0", "derive_more", @@ -4716,43 +4695,52 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-sys" -version = "0.45.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" -version = "0.42.2" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -4761,47 +4749,89 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] @@ -4823,27 +4853,40 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zk_evm" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.2#397683815115d21c6f9d314463b1ffaafdfc1951" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zk_evm.git?branch=v1.3.3#9a1eaa98acb9e3280dbbde5b132cbf64e15fe96e" dependencies = [ + "anyhow", "lazy_static", "num 0.4.0", "serde", "serde_json", "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs", +] + +[[package]] +name = "zk_evm_abstractions" +version = "0.1.0" +source = "git+https://github.com/matter-labs/zk_evm_abstractions.git#31361360123b4f2532ab345522c9b19510f04c31" +dependencies = [ + "anyhow", + "serde", + "static_assertions", "zkevm_opcode_defs", ] [[package]] name = "zkevm-assembly" version = "1.3.2" -source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#77a55f8427a2b44a19e213c06440da5248edbd2c" +source = "git+https://github.com/matter-labs/zkEVM-assembly.git?branch=v1.3.2#a5f2c38305fa672ec23cf3d4d804eb50e591288c" dependencies = [ "env_logger 0.9.3", "hex", @@ -4865,7 +4908,7 @@ name = "zkevm_opcode_defs" version = "1.3.2" source = "git+https://github.com/matter-labs/zkevm_opcode_defs.git?branch=v1.3.2#261b48e9369b356bbd65023d20227b45b47915a2" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "blake2 0.10.6", "ethereum-types", "k256", @@ -4876,8 +4919,8 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "1.3.2" -source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.2#1364026143d4060550130dc3f644ea74ee245441" +version = "1.3.3" +source = "git+https://github.com/matter-labs/zkevm_test_harness.git?branch=v1.3.3#363ead7afaac72bd3006c49d501934747781cbb4" dependencies = [ "bincode", "circuit_testing", @@ -4957,7 +5000,6 @@ name = "zksync_mini_merkle_tree" version = "1.0.0" dependencies = [ "once_cell", - "rayon", "zksync_basic_types", "zksync_crypto", ] @@ -4970,20 +5012,16 @@ dependencies = [ "blake2 0.10.6", "chrono", "codegen 0.1.0", - "ethbloom", - "hex", "metrics", "num 0.3.1", "once_cell", "parity-crypto", - "rayon", "rlp", "serde", "serde_json", "serde_with", "strum", "thiserror", - "tiny-keccak 1.5.0", "zk_evm", "zkevm-assembly", "zkevm_test_harness", @@ -5008,7 +5046,7 @@ dependencies = [ "reqwest", "serde", "thiserror", - "tokio 1.26.0", + "tokio 1.28.2", "vlog", "zk_evm", "zksync_basic_types", diff --git a/core/bin/setup_key_generator_and_server/Cargo.toml b/core/bin/setup_key_generator_and_server/Cargo.toml index 6c4340160f46..87ce636f8791 100644 --- a/core/bin/setup_key_generator_and_server/Cargo.toml +++ b/core/bin/setup_key_generator_and_server/Cargo.toml @@ -4,8 +4,8 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] @@ -23,9 +23,9 @@ vlog = { path = "../../lib/vlog", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } circuit_testing = {git = "https://github.com/matter-labs/era-circuit_testing.git", branch = "main"} -api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} -prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.2", features=["gpu"], default-features=false} -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.2"} +api = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.3", features=["gpu"], default-features=false} +prover-service = { git = "https://github.com/matter-labs/era-heavy-ops-service.git", branch = "v1.3.3", features=["gpu"], default-features=false} +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3"} structopt = "0.3.26" diff --git a/core/bin/system-constants-generator/Cargo.toml b/core/bin/system-constants-generator/Cargo.toml index 3cc499e4f029..4d4fea76b185 100644 --- a/core/bin/system-constants-generator/Cargo.toml +++ b/core/bin/system-constants-generator/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] description = "Tool for generating JSON files with the system constants for L1/L2 contracts" @@ -12,7 +12,6 @@ publish = false # We don't want to publish our binaries. [dependencies] zksync_state = { path = "../../lib/state", version = "1.0" } -zksync_storage = { path = "../../lib/storage", version = "1.0" } zksync_types = { path = "../../lib/types", version = "1.0" } zksync_utils = { path = "../../lib/utils", version = "1.0" } zksync_contracts = {path = "../../lib/contracts", version = "1.0" } @@ -23,6 +22,3 @@ codegen = "0.2.0" serde = "1.0" serde_json = "1.0" once_cell = "1.7" -rand = { version = "0.7" } -num = { version = "0.3", features = ["serde", "rand"] } -tempfile = "3.0.2" diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index b7c5c5c91042..18b7ad8fd068 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -129,8 +129,8 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst scope.raw( vec![ - " ", - " ", + "//", + "//", ] .join("\n"), ); diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 9c27634c1ed5..1f1d5e4abeb9 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -1,11 +1,6 @@ use once_cell::sync::Lazy; -use tempfile::TempDir; use vm::{ - storage::Storage, - utils::{ - create_test_block_params, insert_system_contracts, read_bootloader_test_code, - BLOCK_GAS_LIMIT, - }, + utils::{create_test_block_params, read_bootloader_test_code, BLOCK_GAS_LIMIT}, vm_with_bootloader::{ init_vm_inner, push_raw_transaction_to_bootloader_memory, BlockContextMode, BootloaderJobType, DerivedBlockContext, TxExecutionMode, @@ -17,8 +12,7 @@ use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; -use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; -use zksync_storage::{db::Database, RocksDB}; +use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; use zksync_types::{ ethabi::Token, fee::Fee, @@ -148,20 +142,14 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let (block_context, block_properties) = create_test_block_params(); let block_context: DerivedBlockContext = block_context.into(); - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); - let mut raw_storage = SecondaryStateStorage::new(db); - insert_system_contracts(&mut raw_storage); - let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); - - let bootloader_balane_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); - storage_ptr.set_value(&bootloader_balane_key, u256_to_h256(U256([0, 0, 1, 0]))); - - let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage_view = StorageView::new(raw_storage); + let bootloader_balance_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); + storage_view.set_value(bootloader_balance_key, u256_to_h256(U256([0, 0, 1, 0]))); + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); let bytecode = read_bootloader_test_code("transfer_test"); let hash = hash_bytecode(&bytecode); - let bootloader = SystemContractCode { code: bytes_to_be_words(bytecode), hash, @@ -169,7 +157,6 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); - let default_aa = SystemContractCode { code: bytes_to_be_words(bytecode), hash, @@ -242,16 +229,13 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let (block_context, block_properties) = create_test_block_params(); let block_context: DerivedBlockContext = block_context.into(); - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let db = RocksDB::new(Database::StateKeeper, temp_dir.as_ref(), false); - let mut raw_storage = SecondaryStateStorage::new(db); - insert_system_contracts(&mut raw_storage); - let storage_ptr: &mut dyn Storage = &mut StorageView::new(&raw_storage); + let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage_view = StorageView::new(raw_storage); for tx in txs.iter() { let sender_address = tx.initiator_account(); let key = storage_key_for_eth_balance(&sender_address); - storage_ptr.set_value(&key, u256_to_h256(U256([0, 0, 1, 0]))); + storage_view.set_value(key, u256_to_h256(U256([0, 0, 1, 0]))); } // We also set some of the storage slots to non-zero values. This is not how it will be @@ -267,12 +251,12 @@ pub(super) fn execute_user_txs_in_test_gas_vm( SYSTEM_CONTEXT_GAS_PRICE_POSITION, ); - storage_ptr.set_value(&bootloader_balance_key, u256_to_h256(U256([1, 0, 0, 0]))); - storage_ptr.set_value(&tx_origin_key, u256_to_h256(U256([1, 0, 0, 0]))); - storage_ptr.set_value(&tx_gas_price_key, u256_to_h256(U256([1, 0, 0, 0]))); + storage_view.set_value(bootloader_balance_key, u256_to_h256(U256([1, 0, 0, 0]))); + storage_view.set_value(tx_origin_key, u256_to_h256(U256([1, 0, 0, 0]))); + storage_view.set_value(tx_gas_price_key, u256_to_h256(U256([1, 0, 0, 0]))); } - let mut oracle_tools = OracleTools::new(storage_ptr, HistoryEnabled); + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); let mut vm = init_vm_inner( &mut oracle_tools, diff --git a/core/bin/test_node/Cargo.toml b/core/bin/test_node/Cargo.toml new file mode 100644 index 000000000000..873e92c90cdc --- /dev/null +++ b/core/bin/test_node/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "zksync_test_node" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +zksync_core = { path = "../zksync_core", version = "1.0" } +zksync_basic_types = { path = "../../lib/basic_types", version = "1.0" } +vlog = { path = "../../lib/vlog", version = "1.0" } + +zksync_contracts = { path = "../../lib/contracts" } +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_utils = { path = "../../lib/utils", version = "1.0" } +zksync_state = {path = "../../lib/state", version = "1.0" } +vm = {path = "../../lib/vm", version = "0.1.0"} + +anyhow = "1.0" +tokio = { version = "1", features = ["time", "rt"] } +futures = { version = "0.3", features = ["compat"] } +once_cell = "1.7" + + +jsonrpc-http-server = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } +jsonrpc-core = { git = "https://github.com/matter-labs/jsonrpc.git", branch = "master" } +zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-features = false, features = [ + "server", "client" +] } +clap = { version = "4.2.4", features = ["derive"] } +reqwest = { version = "0.11", features = ["blocking"] } +serde = { version = "1.0", features = ["derive"] } +tracing = { version = "0.1.26", features = ["log"] } +tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter", "time", "json"] } +bigdecimal = { version = "0.2.0" } diff --git a/core/bin/test_node/README.md b/core/bin/test_node/README.md new file mode 100644 index 000000000000..0f3efaf67268 --- /dev/null +++ b/core/bin/test_node/README.md @@ -0,0 +1,116 @@ +# In memory node, with fork support + +This crate provides an in-memory node that supports forking the state from other networks. + +The goal of this crate is to offer a fast solution for integration testing, bootloader and system contract testing, and +prototyping. + +Please note that this crate is still in the alpha stage, and not all functionality is fully supported. For final +testing, it is highly recommended to use the 'local-node' or a testnet. + +Current limitations: + +- No communication between Layer 1 and Layer 2 (the local node operates only on Layer 2). +- Many APIs are not yet implemented, but the basic set of APIs is supported. +- No support for accessing historical data, such as the storage state at a specific block. +- Only one transaction is allowed per Layer 1 batch. +- Fixed values are returned for zk Gas estimation. + +Current features: + +- Can fork the state of the mainnet, testnet, or a custom network at any given height. +- Uses local bootloader and system contracts, making it suitable for testing new changes. +- When running in non-fork mode, it operates deterministically (only one transaction per block, etc.), which simplifies + testing. +- Starts up quickly and comes pre-configured with a few 'rich' accounts. + +## How to + +To start a node: + +```shell +cargo run --release -p zksync_test_node run +``` + +This will run a node (with an empty state) and make it available on port 8011 + +To fork mainnet: + +```shell +cargo run --release -p zksync_test_node fork mainnet +``` + +This will run the node, forked at current head of mainnet + +You can also specify the custom http endpoint and custom forking height: + +```shell +cargo run --release -p zksync_test_node fork --fork-at 7000000 http://172.17.0.3:3060 +``` + +## Forking network & sending calls + +You can use your favorite development tool (or tools like `curl`) or zksync-foundry: + +Check testnet LINK balance + +```shell +$ cargo run --release -p zksync_test_node fork testnet + +$ zkcast call 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 "name()(string)" --rpc-url http://localhost:8011 + +> ChainLink Token (goerli) + + +$ $ zkcast call 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 "balanceOf(address)(uint256)" 0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78 --rpc-url http://localhost:8011 +> 28762283719732275444443116625665 +``` + +Or Mainnet USDT: + +```shell +cargo run -p zksync_test_node fork mainnet + +zkcast call 0x493257fD37EDB34451f62EDf8D2a0C418852bA4C "name()(string)" --rpc-url http://localhost:8011 + +> Tether USD +``` + +And you can also build & deploy your own contracts: + +```shell +fzkforge zkc src/Greeter.sol:Greeter --constructor-args "ZkSync and Foundry" --private-key 7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110 --rpc-url http://localhost:8011 --chain 270 + +``` + +## Testing bootloader & system contracts + +In-memory node is taking the currently compiled bootloader & system contracts - therefore easily allowing to test +changes (and together with fork, allows to see the effects of the changes on the already deployed contracts). + +You can see the bootloader logs, by setting the proper log level. In the example below, we recompile the bootloader, and +run it with mainnet fork. + +```shell + +cd etc/system-contracts +yarn preprocess && yarn hardhat run ./scripts/compile-yul.ts +cd - +RUST_LOG=vm=trace cargo run -p zksync_test_node fork --fork-at 70000000 testnet +``` + +## Replaying other network transaction locally + +Imagine, that you have a testnet transaction, that you'd like to replay locally (for example to see more debug +information). + +```shell +cargo run --release -p zksync_test_node replay_tx testnet 0x7f039bcbb1490b855be37e74cf2400503ad57f51c84856362f99b0cbf1ef478a +``` + +### How does it work + +It utilizes an in-memory database to store the state information and employs simplified hashmaps to track blocks and +transactions. + +In fork mode, it attempts to retrieve missing storage data from a remote source when it's not available locally. diff --git a/core/bin/test_node/src/fork.rs b/core/bin/test_node/src/fork.rs new file mode 100644 index 000000000000..fe12d7ae03b2 --- /dev/null +++ b/core/bin/test_node/src/fork.rs @@ -0,0 +1,301 @@ +//! This file hold tools used for test-forking other networks. +//! +//! There is ForkStorage (that is a wrapper over InMemoryStorage) +//! And ForkDetails - that parses network address and fork height from arguments. + +use std::{ + collections::HashMap, + convert::TryInto, + future::Future, + sync::{Arc, RwLock}, +}; + +use tokio::runtime::Builder; +use zksync_basic_types::{L1BatchNumber, L2ChainId, MiniblockNumber, H256, U64}; + +use zksync_state::{InMemoryStorage, ReadStorage}; +use zksync_types::{ + api::{BlockIdVariant, BlockNumber}, + l2::L2Tx, + StorageKey, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use zksync_web3_decl::{jsonrpsee::http_client::HttpClient, namespaces::EthNamespaceClient}; +use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; + +use crate::node::TEST_NODE_NETWORK_ID; + +fn block_on(future: F) -> F::Output +where + F::Output: Send, +{ + std::thread::spawn(move || { + let runtime = Builder::new_current_thread() + .enable_all() + .build() + .expect("tokio runtime creation failed"); + runtime.block_on(future) + }) + .join() + .unwrap() +} + +/// In memory storage, that allows 'forking' from other network. +/// If forking is enabled, it reads missing data from remote location. +#[derive(Debug)] +pub struct ForkStorage { + pub inner: Arc>, + pub chain_id: L2ChainId, +} + +#[derive(Debug)] +pub struct ForkStorageInner { + // Underlying local storage + pub raw_storage: InMemoryStorage, + // Cache of data that was read from remote location. + pub value_read_cache: HashMap, + // Cache of factory deps that were read from remote location. + pub factory_dep_cache: HashMap>>, + // If set - it hold the necessary information on where to fetch the data. + // If not set - it will simply read from underlying storage. + pub fork: Option, +} + +impl ForkStorage { + pub fn new(fork: Option) -> Self { + let chain_id = fork + .as_ref() + .and_then(|d| d.overwrite_chain_id) + .unwrap_or(L2ChainId(TEST_NODE_NETWORK_ID)); + println!("Starting network with chain id: {:?}", chain_id); + + ForkStorage { + inner: Arc::new(RwLock::new(ForkStorageInner { + raw_storage: InMemoryStorage::with_system_contracts_and_chain_id( + chain_id, + hash_bytecode, + ), + value_read_cache: Default::default(), + fork, + factory_dep_cache: Default::default(), + })), + chain_id, + } + } + + fn read_value_internal(&self, key: &StorageKey) -> zksync_types::StorageValue { + let mut mutator = self.inner.write().unwrap(); + let local_storage = mutator.raw_storage.read_value(key); + + if let Some(fork) = &mutator.fork { + if !H256::is_zero(&local_storage) { + return local_storage; + } + + if let Some(value) = mutator.value_read_cache.get(key) { + return *value; + } + let fork_ = (*fork).clone(); + let key_ = *key; + + let client = fork.create_client(); + + let result = block_on(async move { + client + .get_storage_at( + *key_.account().address(), + h256_to_u256(*key_.key()), + Some(BlockIdVariant::BlockNumber(BlockNumber::Number(U64::from( + fork_.l2_miniblock, + )))), + ) + .await + }) + .unwrap(); + + mutator.value_read_cache.insert(*key, result); + result + } else { + local_storage + } + } + + pub fn load_factory_dep_internal(&self, hash: H256) -> Option> { + let mut mutator = self.inner.write().unwrap(); + let local_storage = mutator.raw_storage.load_factory_dep(hash); + if let Some(fork) = &mutator.fork { + if local_storage.is_some() { + return local_storage; + } + if let Some(value) = mutator.factory_dep_cache.get(&hash) { + return value.clone(); + } + + let client = fork.create_client(); + let result = block_on(async move { client.get_bytecode_by_hash(hash).await }).unwrap(); + mutator.factory_dep_cache.insert(hash, result.clone()); + result + } else { + local_storage + } + } +} + +impl ReadStorage for ForkStorage { + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + (&*self).is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + (&*self).load_factory_dep(hash) + } + + fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { + (&*self).read_value(key) + } +} + +impl ReadStorage for &ForkStorage { + fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { + self.read_value_internal(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.load_factory_dep_internal(hash) + } +} + +impl ForkStorage { + pub fn set_value(&mut self, key: StorageKey, value: zksync_types::StorageValue) { + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.set_value(key, value) + } + pub fn store_factory_dep(&mut self, hash: H256, bytecode: Vec) { + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.store_factory_dep(hash, bytecode) + } +} + +/// Holds the information about the original chain. +#[derive(Debug, Clone)] +pub struct ForkDetails { + // URL to the server. + pub fork_url: String, + // Block number at which we forked (the next block to create is l1_block + 1) + pub l1_block: L1BatchNumber, + pub l2_miniblock: u64, + pub block_timestamp: u64, + pub overwrite_chain_id: Option, +} + +impl ForkDetails { + pub async fn from_url_and_miniblock_and_chain( + url: &str, + client: HttpClient, + miniblock: u64, + chain_id: Option, + ) -> Self { + let block_details = client + .get_block_details(MiniblockNumber(miniblock as u32)) + .await + .unwrap() + .unwrap_or_else(|| panic!("Could not find block {:?} in {:?}", miniblock, url)); + + let l1_batch_number = block_details.l1_batch_number; + + println!( + "Creating fork from {:?} L1 block: {:?} L2 block: {:?} with timestamp {:?}", + url, l1_batch_number, miniblock, block_details.timestamp + ); + + ForkDetails { + fork_url: url.to_owned(), + l1_block: l1_batch_number, + block_timestamp: block_details.timestamp, + l2_miniblock: miniblock, + overwrite_chain_id: chain_id, + } + } + + /// Create a fork from a given network at a given height. + pub async fn from_network(fork: &str, fork_at: Option) -> Self { + let (url, client) = Self::fork_to_url_and_client(fork); + let l2_miniblock = if let Some(fork_at) = fork_at { + fork_at + } else { + client.get_block_number().await.unwrap().as_u64() + }; + Self::from_url_and_miniblock_and_chain(url, client, l2_miniblock, None).await + } + + /// Create a fork from a given network, at a height BEFORE a transaction. + /// This will allow us to apply this transaction locally on top of this fork. + pub async fn from_network_tx(fork: &str, tx: H256) -> Self { + let (url, client) = Self::fork_to_url_and_client(fork); + let tx_details = client.get_transaction_by_hash(tx).await.unwrap().unwrap(); + let overwrite_chain_id = Some(L2ChainId(tx_details.chain_id.as_u32() as u16)); + let miniblock_number = MiniblockNumber(tx_details.block_number.unwrap().as_u32()); + // We have to sync to the one-miniblock before the one where transaction is. + let l2_miniblock = miniblock_number.saturating_sub(1) as u64; + + Self::from_url_and_miniblock_and_chain(url, client, l2_miniblock, overwrite_chain_id).await + } + + /// Return URL and HTTP client for a given fork name. + pub fn fork_to_url_and_client(fork: &str) -> (&str, HttpClient) { + let url = match fork { + "mainnet" => "https://mainnet.era.zksync.io:443", + "testnet" => "https://testnet.era.zksync.dev:443", + _ => fork, + }; + + let client = HttpClientBuilder::default() + .build(url) + .expect("Unable to create a client for fork"); + + (url, client) + } + + /// Returns transactions that are in the same L2 miniblock as replay_tx, but were executed before it. + pub async fn get_earlier_transactions_in_same_block(&self, replay_tx: H256) -> Vec { + let client = self.create_client(); + + let tx_details = client + .get_transaction_by_hash(replay_tx) + .await + .unwrap() + .unwrap(); + let miniblock = MiniblockNumber(tx_details.block_number.unwrap().as_u32()); + + // And we're fetching all the transactions from this miniblock. + let block_transactions: Vec = + client.get_raw_block_transactions(miniblock).await.unwrap(); + let mut tx_to_apply = Vec::new(); + + for tx in block_transactions { + let h = tx.hash(); + let l2_tx: L2Tx = tx.try_into().unwrap(); + tx_to_apply.push(l2_tx); + + if h == replay_tx { + return tx_to_apply; + } + } + panic!( + "Cound not find tx {:?} in miniblock: {:?}", + replay_tx, miniblock + ); + } + + pub fn create_client(&self) -> HttpClient { + HttpClientBuilder::default() + .build(self.fork_url.clone()) + .expect("Unable to create a client for fork") + } +} diff --git a/core/bin/test_node/src/main.rs b/core/bin/test_node/src/main.rs new file mode 100644 index 000000000000..ba5d5be830dd --- /dev/null +++ b/core/bin/test_node/src/main.rs @@ -0,0 +1,184 @@ +use clap::{Parser, Subcommand}; +use fork::ForkDetails; +use zks::ZkMockNamespaceImpl; + +mod fork; +mod node; +mod utils; +mod zks; + +use node::InMemoryNode; + +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + str::FromStr, +}; + +use tracing::Level; +use tracing_subscriber::{EnvFilter, FmtSubscriber}; + +use futures::{ + channel::oneshot, + future::{self}, + FutureExt, +}; +use jsonrpc_core::IoHandler; +use zksync_basic_types::{H160, H256}; + +use zksync_core::api_server::web3::backend_jsonrpc::namespaces::{ + eth::EthNamespaceT, zks::ZksNamespaceT, +}; + +/// List of wallets (address, private key) that we seed with tokens at start. +pub const RICH_WALLETS: [(&str, &str); 4] = [ + ( + "0x36615Cf349d7F6344891B1e7CA7C72883F5dc049", + "0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110", + ), + ( + "0xa61464658AfeAf65CccaaFD3a512b69A83B77618", + "0xac1e735be8536c6534bb4f17f06f6afc73b2b5ba84ac2cfb12f7461b20c0bbe3", + ), + ( + "0x0D43eB5B8a47bA8900d84AA36656c92024e9772e", + "0xd293c684d884d56f8d6abd64fc76757d3664904e309a0645baf8522ab6366d9e", + ), + ( + "0xA13c10C0D5bd6f79041B9835c63f91de35A15883", + "0x850683b40d4a740aa6e745f889a6fdc8327be76e122f5aba645a5b02d0248db8", + ), +]; + +async fn build_json_http(addr: SocketAddr, node: InMemoryNode) -> tokio::task::JoinHandle<()> { + let (sender, recv) = oneshot::channel::<()>(); + + let io_handler = { + let mut io = IoHandler::new(); + io.extend_with(node.to_delegate()); + io.extend_with(ZkMockNamespaceImpl.to_delegate()); + + io + }; + + std::thread::spawn(move || { + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(1) + .build() + .unwrap(); + + let server = jsonrpc_http_server::ServerBuilder::new(io_handler) + .threads(1) + .event_loop_executor(runtime.handle().clone()) + .start_http(&addr) + .unwrap(); + + server.wait(); + let _ = sender; + }); + + tokio::spawn(recv.map(drop)) +} + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version, about = "Test Node", long_about = None)] +struct Cli { + #[command(subcommand)] + command: Command, + #[arg(long, default_value = "8011")] + /// Port to listen on - default: 8011 + port: u16, +} + +#[derive(Debug, Subcommand)] +enum Command { + #[command(name = "run")] + Run, + #[command(name = "fork")] + Fork(ForkArgs), + #[command(name = "replay_tx")] + ReplayTx(ReplayArgs), +} + +#[derive(Debug, Parser)] +struct ForkArgs { + /// Whether to fork from existing network. + /// If not set - will start a new network from genesis. + /// If set - will try to fork a remote network. Possible values: + /// - mainnet + /// - testnet + /// - http://XXX:YY + network: String, + #[arg(long)] + // Fork at a given L2 miniblock height. + // If not set - will use the current finalized block from the network. + fork_at: Option, +} +#[derive(Debug, Parser)] +struct ReplayArgs { + /// Whether to fork from existing network. + /// If not set - will start a new network from genesis. + /// If set - will try to fork a remote network. Possible values: + /// - mainnet + /// - testnet + /// - http://XXX:YY + network: String, + /// Transaction hash to replay. + tx: H256, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Cli::parse(); + let filter = EnvFilter::from_default_env(); + + let subscriber = FmtSubscriber::builder() + .with_max_level(Level::TRACE) + .with_env_filter(filter) + .finish(); + + // Initialize the subscriber + tracing::subscriber::set_global_default(subscriber).expect("failed to set tracing subscriber"); + + let fork_details = match &opt.command { + Command::Run => None, + Command::Fork(fork) => Some(ForkDetails::from_network(&fork.network, fork.fork_at).await), + Command::ReplayTx(replay_tx) => { + Some(ForkDetails::from_network_tx(&replay_tx.network, replay_tx.tx).await) + } + }; + + // If we're replaying the transaction, we need to sync to the previous block + // and then replay all the transactions that happened in + let transactions_to_replay = if let Command::ReplayTx(replay_tx) = &opt.command { + fork_details + .as_ref() + .unwrap() + .get_earlier_transactions_in_same_block(replay_tx.tx) + .await + } else { + vec![] + }; + + let node = InMemoryNode::new(fork_details); + + if !transactions_to_replay.is_empty() { + node.apply_txs(transactions_to_replay); + } + + println!("Setting Rich accounts:"); + for (address, private_key) in RICH_WALLETS.iter() { + node.set_rich_account(H160::from_str(address).unwrap()); + println!("Address: {:?} Key: {:?}", address, private_key) + } + + let threads = build_json_http( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), opt.port), + node, + ) + .await; + + future::select_all(vec![threads]).await.0.unwrap(); + + Ok(()) +} diff --git a/core/bin/test_node/src/node.rs b/core/bin/test_node/src/node.rs new file mode 100644 index 000000000000..0a2777e70f09 --- /dev/null +++ b/core/bin/test_node/src/node.rs @@ -0,0 +1,657 @@ +//! In-memory node, that supports forking other networks. +use crate::{ + fork::{ForkDetails, ForkStorage}, + utils::IntoBoxedFuture, +}; + +use std::{ + collections::HashMap, + convert::TryInto, + sync::{Arc, RwLock}, +}; + +use once_cell::sync::Lazy; + +use zksync_basic_types::{AccountTreeId, Bytes, H160, H256, U256, U64}; +use zksync_contracts::BaseSystemContracts; +use zksync_core::api_server::web3::backend_jsonrpc::namespaces::eth::EthNamespaceT; +use zksync_state::{ReadStorage, StorageView, WriteStorage}; +use zksync_types::{ + api::{TransactionReceipt, TransactionVariant}, + get_code_key, get_nonce_key, + l2::L2Tx, + transaction_request::{l2_tx_from_call_req, TransactionRequest}, + tx::tx_execution_info::TxExecutionStatus, + utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, + StorageKey, StorageLogQueryType, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, + L2_ETH_TOKEN_ADDRESS, +}; +use zksync_utils::{h256_to_account_address, h256_to_u256, h256_to_u64, u256_to_h256}; + +use vm::{ + utils::{create_test_block_params, BASE_SYSTEM_CONTRACTS, BLOCK_GAS_LIMIT, ETH_CALL_GAS_LIMIT}, + vm::VmTxExecutionResult, + vm_with_bootloader::{ + init_vm_inner, push_transaction_to_bootloader_memory, BlockContextMode, BootloaderJobType, + TxExecutionMode, + }, + HistoryEnabled, OracleTools, +}; +use zksync_web3_decl::types::{Filter, FilterChanges}; + +pub const MAX_TX_SIZE: usize = 1000000; +// Timestamp of the first block (if not running in fork mode). +pub const NON_FORK_FIRST_BLOCK_TIMESTAMP: u64 = 1000; +/// Network ID we use for the test node. +pub const TEST_NODE_NETWORK_ID: u16 = 270; + +/// Basic information about the generated block (which is block l1 batch and miniblock). +/// Currently, this test node supports exactly one transaction per block. +pub struct BlockInfo { + pub batch_number: u32, + pub block_timestamp: u64, + /// Transaction included in this block. + pub tx_hash: H256, +} + +/// Information about the executed transaction. +pub struct TxExecutionInfo { + pub tx: L2Tx, + // Batch number where transaction was executed. + pub batch_number: u32, + pub miniblock_number: u64, + pub result: VmTxExecutionResult, +} + +/// Helper struct for InMemoryNode. +pub struct InMemoryNodeInner { + /// Timestamp, batch number and miniblock number that will be used by the next block. + pub current_timestamp: u64, + pub current_batch: u32, + pub current_miniblock: u64, + // Map from transaction to details about the exeuction + pub tx_results: HashMap, + // Map from batch number to information about the block. + pub blocks: HashMap, + // Underlying storage + pub fork_storage: ForkStorage, +} + +fn not_implemented() -> jsonrpc_core::BoxFuture> { + Err(jsonrpc_core::Error::method_not_found()).into_boxed_future() +} + +/// In-memory node, that can be used for local & unit testing. +/// It also supports the option of forking testnet/mainnet. +/// All contents are removed when object is destroyed. +pub struct InMemoryNode { + inner: Arc>, +} + +pub static PLAYGROUND_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::playground); + +fn contract_address_from_tx_result(execution_result: &VmTxExecutionResult) -> Option { + for query in execution_result.result.logs.storage_logs.iter().rev() { + if query.log_type == StorageLogQueryType::InitialWrite + && query.log_query.address == ACCOUNT_CODE_STORAGE_ADDRESS + { + return Some(h256_to_account_address(&u256_to_h256(query.log_query.key))); + } + } + None +} + +impl InMemoryNode { + pub fn new(fork: Option) -> Self { + InMemoryNode { + inner: Arc::new(RwLock::new(InMemoryNodeInner { + current_timestamp: fork + .as_ref() + .map(|f| f.block_timestamp + 1) + .unwrap_or(NON_FORK_FIRST_BLOCK_TIMESTAMP), + current_batch: fork.as_ref().map(|f| f.l1_block.0 + 1).unwrap_or(1), + current_miniblock: fork.as_ref().map(|f| f.l2_miniblock + 1).unwrap_or(1), + tx_results: Default::default(), + blocks: Default::default(), + fork_storage: ForkStorage::new(fork), + })), + } + } + + /// Applies multiple transactions - but still one per L1 batch. + pub fn apply_txs(&self, txs: Vec) { + println!("Running {:?} transactions (one per batch)", txs.len()); + + for tx in txs { + println!("Executing {:?}", tx.hash()); + self.run_l2_tx(tx, TxExecutionMode::VerifyExecute); + } + } + + /// Adds a lot of tokens to a given account. + pub fn set_rich_account(&self, address: H160) { + let key = storage_key_for_eth_balance(&address); + let mut inner = self.inner.write().unwrap(); + let keys = { + let mut storage_view = StorageView::new(&inner.fork_storage); + storage_view.set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); + storage_view.modified_storage_keys().clone() + }; + + for (key, value) in keys.iter() { + inner.fork_storage.set_value(*key, *value); + } + } + + /// Runs L2 'eth call' method - that doesn't commit to a block. + fn run_l2_call(&self, l2_tx: L2Tx) -> Vec { + let execution_mode = TxExecutionMode::EthCall { + missed_storage_invocation_limit: 1000000, + }; + let (mut block_context, block_properties) = create_test_block_params(); + + let inner = self.inner.write().unwrap(); + + let mut storage_view = StorageView::new(&inner.fork_storage); + + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + + let bootloader_code = &PLAYGROUND_SYSTEM_CONTRACTS; + block_context.block_number = inner.current_batch; + block_context.block_timestamp = inner.current_timestamp; + + // init vm + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + bootloader_code, + execution_mode, + ); + + let tx: Transaction = l2_tx.into(); + + push_transaction_to_bootloader_memory(&mut vm, &tx, execution_mode, None); + + let vm_block_result = + vm.execute_till_block_end_with_call_tracer(BootloaderJobType::TransactionExecution); + + match vm_block_result.full_result.revert_reason { + Some(result) => result.original_data, + None => vm_block_result + .full_result + .return_data + .into_iter() + .flat_map(|val| { + let bytes: [u8; 32] = val.into(); + bytes.to_vec() + }) + .collect::>(), + } + } + + fn run_l2_tx_inner( + &self, + l2_tx: L2Tx, + execution_mode: TxExecutionMode, + ) -> ( + HashMap, + VmTxExecutionResult, + BlockInfo, + HashMap>, + ) { + let (mut block_context, block_properties) = create_test_block_params(); + + let inner = self.inner.write().unwrap(); + + let mut storage_view = StorageView::new(&inner.fork_storage); + + let mut oracle_tools = OracleTools::new(&mut storage_view, HistoryEnabled); + + let bootloader_code = if execution_mode == TxExecutionMode::VerifyExecute { + &BASE_SYSTEM_CONTRACTS + } else { + &PLAYGROUND_SYSTEM_CONTRACTS + }; + + block_context.block_number = inner.current_batch; + block_context.block_timestamp = inner.current_timestamp; + let block = BlockInfo { + batch_number: block_context.block_number, + block_timestamp: block_context.block_timestamp, + tx_hash: l2_tx.hash(), + }; + + // init vm + let mut vm = init_vm_inner( + &mut oracle_tools, + BlockContextMode::NewBlock(block_context.into(), Default::default()), + &block_properties, + BLOCK_GAS_LIMIT, + bootloader_code, + execution_mode, + ); + + let tx: Transaction = l2_tx.into(); + + push_transaction_to_bootloader_memory(&mut vm, &tx, execution_mode, None); + + let tx_result = vm.execute_next_tx(u32::MAX, true).unwrap(); + + println!( + "Tx Execution results: {:?} {:?}", + tx_result.status, tx_result.result.revert_reason + ); + + vm.execute_till_block_end(BootloaderJobType::BlockPostprocessing); + + let bytecodes = vm + .state + .decommittment_processor + .known_bytecodes + .inner() + .clone(); + + let modified_keys = storage_view.modified_storage_keys().clone(); + (modified_keys, tx_result, block, bytecodes) + } + + /// Runs L2 transaction and commits it to a new block. + fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) { + let tx_hash = l2_tx.hash(); + + let (keys, result, block, bytecodes) = self.run_l2_tx_inner(l2_tx.clone(), execution_mode); + + // Write all the mutated keys (storage slots). + let mut inner = self.inner.write().unwrap(); + for (key, value) in keys.iter() { + inner.fork_storage.set_value(*key, *value); + } + + // Write all the factory deps. + for (hash, code) in bytecodes.iter() { + inner.fork_storage.store_factory_dep( + u256_to_h256(*hash), + code.iter() + .flat_map(|entry| { + let mut bytes = vec![0u8; 32]; + entry.to_big_endian(&mut bytes); + bytes.to_vec() + }) + .collect(), + ) + } + let current_miniblock = inner.current_miniblock; + inner.tx_results.insert( + tx_hash, + TxExecutionInfo { + tx: l2_tx, + batch_number: block.batch_number, + miniblock_number: current_miniblock, + result, + }, + ); + inner.blocks.insert(block.batch_number, block); + { + inner.current_timestamp += 1; + inner.current_batch += 1; + inner.current_miniblock += 1; + } + } +} + +impl EthNamespaceT for InMemoryNode { + fn chain_id(&self) -> jsonrpc_core::BoxFuture> { + let inner = self.inner.read().unwrap(); + Ok(U64::from(inner.fork_storage.chain_id.0 as u64)).into_boxed_future() + } + + fn call( + &self, + req: zksync_types::transaction_request::CallRequest, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + let mut tx = l2_tx_from_call_req(req, MAX_TX_SIZE).unwrap(); + tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); + let result = self.run_l2_call(tx); + + Ok(result.into()).into_boxed_future() + } + + fn get_balance( + &self, + address: zksync_basic_types::Address, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + let balance_key = storage_key_for_standard_token_balance( + AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), + &address, + ); + + let balance = self + .inner + .write() + .unwrap() + .fork_storage + .read_value(&balance_key); + + Ok(h256_to_u256(balance)).into_boxed_future() + } + + fn get_block_by_number( + &self, + block_number: zksync_types::api::BlockNumber, + _full_transactions: bool, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result< + Option>, + >, + > { + // Currently we support only the 'most recent' block. + let reader = self.inner.read().unwrap(); + match block_number { + zksync_types::api::BlockNumber::Committed + | zksync_types::api::BlockNumber::Finalized + | zksync_types::api::BlockNumber::Latest => {} + zksync_types::api::BlockNumber::Earliest + | zksync_types::api::BlockNumber::Pending + | zksync_types::api::BlockNumber::Number(_) => return not_implemented(), + } + + let txn: Vec = vec![]; + + let block = zksync_types::api::Block { + transactions: txn, + hash: Default::default(), + parent_hash: Default::default(), + uncles_hash: Default::default(), + author: Default::default(), + state_root: Default::default(), + transactions_root: Default::default(), + receipts_root: Default::default(), + number: U64::from(reader.current_miniblock), + l1_batch_number: Some(U64::from(reader.current_batch)), + gas_used: Default::default(), + gas_limit: Default::default(), + base_fee_per_gas: Default::default(), + extra_data: Default::default(), + logs_bloom: Default::default(), + timestamp: Default::default(), + l1_batch_timestamp: Default::default(), + difficulty: Default::default(), + total_difficulty: Default::default(), + seal_fields: Default::default(), + uncles: Default::default(), + size: Default::default(), + mix_hash: Default::default(), + nonce: Default::default(), + }; + + Ok(Some(block)).into_boxed_future() + } + + fn get_code( + &self, + address: zksync_basic_types::Address, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + let code_key = get_code_key(&address); + + let code_hash = self + .inner + .write() + .unwrap() + .fork_storage + .read_value(&code_key); + + Ok(Bytes::from(code_hash.as_bytes())).into_boxed_future() + } + + fn get_transaction_count( + &self, + address: zksync_basic_types::Address, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + let nonce_key = get_nonce_key(&address); + + let result = self + .inner + .write() + .unwrap() + .fork_storage + .read_value(&nonce_key); + Ok(h256_to_u64(result).into()).into_boxed_future() + } + + fn get_transaction_receipt( + &self, + hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>> + { + let reader = self.inner.read().unwrap(); + let tx_result = reader.tx_results.get(&hash); + + let receipt = tx_result.map(|info| { + let status = if info.result.status == TxExecutionStatus::Success { + U64::from(1) + } else { + U64::from(0) + }; + + TransactionReceipt { + transaction_hash: hash, + transaction_index: U64::from(1), + block_hash: None, + block_number: Some(U64::from(info.miniblock_number)), + l1_batch_tx_index: None, + l1_batch_number: Some(U64::from(info.batch_number as u64)), + from: Default::default(), + to: Some(info.tx.execute.contract_address), + cumulative_gas_used: Default::default(), + gas_used: Some(info.tx.common_data.fee.gas_limit - info.result.gas_refunded), + contract_address: contract_address_from_tx_result(&info.result), + logs: vec![], + l2_to_l1_logs: vec![], + status: Some(status), + root: None, + logs_bloom: Default::default(), + transaction_type: None, + effective_gas_price: Some(500.into()), + } + }); + + Ok(receipt).into_boxed_future() + } + + fn send_raw_transaction( + &self, + tx_bytes: zksync_basic_types::Bytes, + ) -> jsonrpc_core::BoxFuture> { + let chain_id = TEST_NODE_NETWORK_ID; + let (tx_req, hash) = + TransactionRequest::from_bytes(&tx_bytes.0, chain_id, MAX_TX_SIZE).unwrap(); + + let mut l2_tx: L2Tx = tx_req.try_into().unwrap(); + l2_tx.set_input(tx_bytes.0, hash); + assert_eq!(hash, l2_tx.hash()); + + self.run_l2_tx(l2_tx, TxExecutionMode::VerifyExecute); + + Ok(hash).into_boxed_future() + } + + // Methods below are not currently implemented. + + fn get_block_number( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn estimate_gas( + &self, + _req: zksync_types::transaction_request::CallRequest, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn gas_price(&self) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn new_filter(&self, _filter: Filter) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn new_block_filter(&self) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn uninstall_filter(&self, _idx: U256) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn new_pending_transaction_filter( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn get_logs( + &self, + _filter: Filter, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_filter_logs( + &self, + _filter_index: U256, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn get_filter_changes( + &self, + _filter_index: U256, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn get_block_by_hash( + &self, + _hash: zksync_basic_types::H256, + _full_transactions: bool, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result< + Option>, + >, + > { + not_implemented() + } + + fn get_block_transaction_count_by_number( + &self, + _block_number: zksync_types::api::BlockNumber, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_block_transaction_count_by_hash( + &self, + _block_hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_storage( + &self, + _address: zksync_basic_types::Address, + _idx: U256, + _block: Option, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn get_transaction_by_hash( + &self, + _hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_transaction_by_block_hash_and_index( + &self, + _block_hash: zksync_basic_types::H256, + _index: zksync_basic_types::web3::types::Index, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_transaction_by_block_number_and_index( + &self, + _block_number: zksync_types::api::BlockNumber, + _index: zksync_basic_types::web3::types::Index, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn protocol_version(&self) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn syncing( + &self, + ) -> jsonrpc_core::BoxFuture> + { + not_implemented() + } + + fn accounts( + &self, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn coinbase( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn compilers(&self) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn hashrate(&self) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn get_uncle_count_by_block_hash( + &self, + _hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn get_uncle_count_by_block_number( + &self, + _number: zksync_types::api::BlockNumber, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented() + } + + fn mining(&self) -> jsonrpc_core::BoxFuture> { + not_implemented() + } + + fn send_transaction( + &self, + _transaction_request: zksync_types::web3::types::TransactionRequest, + ) -> jsonrpc_core::BoxFuture> { + not_implemented() + } +} diff --git a/core/bin/test_node/src/utils.rs b/core/bin/test_node/src/utils.rs new file mode 100644 index 000000000000..76f5017eeab7 --- /dev/null +++ b/core/bin/test_node/src/utils.rs @@ -0,0 +1,16 @@ +use std::pin::Pin; + +use futures::Future; + +pub(crate) trait IntoBoxedFuture: Sized + Send + 'static { + fn into_boxed_future(self) -> Pin + Send>> { + Box::pin(async { self }) + } +} + +impl IntoBoxedFuture for Result +where + T: Send + 'static, + U: Send + 'static, +{ +} diff --git a/core/bin/test_node/src/zks.rs b/core/bin/test_node/src/zks.rs new file mode 100644 index 000000000000..c1f446073a92 --- /dev/null +++ b/core/bin/test_node/src/zks.rs @@ -0,0 +1,173 @@ +use bigdecimal::BigDecimal; +use zksync_basic_types::{MiniblockNumber, U256}; +use zksync_core::api_server::web3::backend_jsonrpc::namespaces::zks::ZksNamespaceT; +use zksync_types::api::BridgeAddresses; + +/// Mock implementation of ZksNamespace - used only in the test node. +pub struct ZkMockNamespaceImpl; + +macro_rules! not_implemented { + () => { + Box::pin(async move { Err(jsonrpc_core::Error::method_not_found()) }) + }; +} +impl ZksNamespaceT for ZkMockNamespaceImpl { + /// We have to support this method, as zksync foundry depends on it. + /// For now, returning a fake amount of gas. + fn estimate_fee( + &self, + _req: zksync_types::transaction_request::CallRequest, + ) -> jsonrpc_core::BoxFuture> { + Box::pin(async move { + Ok(zksync_types::fee::Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }) + }) + } + + fn get_raw_block_transactions( + &self, + _block_number: MiniblockNumber, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented!() + } + + fn estimate_gas_l1_to_l2( + &self, + _req: zksync_types::transaction_request::CallRequest, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_main_contract( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_testnet_paymaster( + &self, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented!() + } + + fn get_bridge_contracts( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn l1_chain_id( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_confirmed_tokens( + &self, + _from: u32, + _limit: u8, + ) -> jsonrpc_core::BoxFuture>> { + not_implemented!() + } + + fn get_token_price( + &self, + _token_address: zksync_basic_types::Address, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_all_account_balances( + &self, + _address: zksync_basic_types::Address, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result>, + > { + not_implemented!() + } + + fn get_l2_to_l1_msg_proof( + &self, + _block: zksync_basic_types::MiniblockNumber, + _sender: zksync_basic_types::Address, + _msg: zksync_basic_types::H256, + _l2_log_position: Option, + ) -> jsonrpc_core::BoxFuture>> + { + not_implemented!() + } + + fn get_l2_to_l1_log_proof( + &self, + _tx_hash: zksync_basic_types::H256, + _index: Option, + ) -> jsonrpc_core::BoxFuture>> + { + not_implemented!() + } + + fn get_l1_batch_number( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_block_details( + &self, + _block_number: zksync_basic_types::MiniblockNumber, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result>, + > { + not_implemented!() + } + + fn get_miniblock_range( + &self, + _batch: zksync_basic_types::L1BatchNumber, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result>, + > { + not_implemented!() + } + + fn set_known_bytecode( + &self, + _bytecode: zksync_basic_types::Bytes, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } + + fn get_transaction_details( + &self, + _hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>> + { + not_implemented!() + } + + fn get_l1_batch_details( + &self, + _batch: zksync_basic_types::L1BatchNumber, + ) -> jsonrpc_core::BoxFuture< + jsonrpc_core::Result>, + > { + not_implemented!() + } + + fn get_bytecode_by_hash( + &self, + _hash: zksync_basic_types::H256, + ) -> jsonrpc_core::BoxFuture>>> { + not_implemented!() + } + + fn get_l1_gas_price( + &self, + ) -> jsonrpc_core::BoxFuture> { + not_implemented!() + } +} diff --git a/core/bin/vk_setup_data_generator_server_fri/Cargo.toml b/core/bin/vk_setup_data_generator_server_fri/Cargo.toml new file mode 100644 index 000000000000..5e0e92abccda --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "vk_setup_data_generator_server_fri" +version = "1.0.0" +edition = "2021" + + +[[bin]] +name = "zksync_vk_generator_fri" +path = "src/main.rs" + +[[bin]] +name = "zksync_setup_data_generator_fri" +path = "src/setup_data_generator.rs" + +[lib] +name = "zksync_vk_setup_data_server_fri" +path = "src/lib.rs" + +[dependencies] +vlog = { path = "../../lib/vlog", version = "1.0" } +zksync_types = {path = "../../lib/types", version = "1.0" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0"} +circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} +zksync_config = { path = "../../lib/config", version = "1.0" } + +serde_json = "1.0" +serde = { version = "1.0", features = ["derive"] } +serde_derive = "1.0" +itertools = "0.10.5" +bincode = "1" +structopt = "0.3.26" diff --git a/core/bin/vk_setup_data_generator_server_fri/README.md b/core/bin/vk_setup_data_generator_server_fri/README.md new file mode 100644 index 000000000000..dd095531ebe4 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/README.md @@ -0,0 +1,5 @@ +# Setup data + +## generating setup-data for specific circuit type + +`cargo +nightly-2023-05-31 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json new file mode 100644 index 000000000000..62d4b5159cfd --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json @@ -0,0 +1,257 @@ +{ + "StorageApplication": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 60, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 26, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132352, + "public_inputs_locations": [ + [ + 0, + 1027358 + ], + [ + 1, + 1027358 + ], + [ + 2, + 1027358 + ], + [ + 3, + 1027358 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 49678851679356961, + 7782973250089517567, + 5742332748381659122, + 7021959466433668811 + ], + [ + 7173510129360491239, + 14879623732006243967, + 17462481942835663634, + 10566534887019251489 + ], + [ + 6111404110591510291, + 16968701421170349713, + 3258322406402816010, + 4065733755715147020 + ], + [ + 11846482936856554144, + 2147966313304461627, + 728534638266355796, + 95380962898885359 + ], + [ + 16229166733732585403, + 681879112503008152, + 15066140174745708871, + 2481070927240530192 + ], + [ + 7420166149307347478, + 16882412417463011370, + 2676278458519593834, + 11896039619588737011 + ], + [ + 9967046125648558907, + 913185819453214883, + 13915100312430656654, + 3451781706223208121 + ], + [ + 11970577891442698507, + 6873264544724217019, + 12143450276430417018, + 10967230584499216609 + ], + [ + 9122945027646360633, + 6776986892003671741, + 16638557427754611081, + 9270157208179163878 + ], + [ + 864383518976280584, + 12660243649720946801, + 9037458929254917711, + 14557825576434269273 + ], + [ + 3250682114219598633, + 6757359290236006418, + 18440828022928773886, + 5144935506772537543 + ], + [ + 2931846103546480575, + 16357131371317801624, + 11786368493872353262, + 16190743419947458980 + ], + [ + 13988603584113133261, + 17909593593928730530, + 8426307689101617932, + 17024276855000805045 + ], + [ + 5063177493048024705, + 14492565639354808569, + 8414969388915699488, + 8783210495893660069 + ], + [ + 3181994260382982581, + 7353211204501032194, + 2789564250321287823, + 14671305422643991999 + ], + [ + 15178790861258436355, + 13029080993287351770, + 11765890450974641664, + 17689888130365913676 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json new file mode 100644 index 000000000000..7d1d36cc5d63 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json @@ -0,0 +1,257 @@ +{ + "EventsSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 18 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 8, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 531517 + ], + [ + 1, + 531517 + ], + [ + 2, + 531517 + ], + [ + 3, + 531517 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 12441068674128723436, + 11790418381101103423, + 6129861349451795186, + 4710685951407331081 + ], + [ + 5096638018234605034, + 9297760046210706272, + 11038604827668536380, + 1473851171818850283 + ], + [ + 10176456143476370592, + 6347222948505884286, + 14243463427362932109, + 2973760622304770278 + ], + [ + 5822232151084468810, + 18342623242512353783, + 9991236151502413973, + 15507511860550425736 + ], + [ + 1818744887123817977, + 12543213684249900332, + 17104140515825257324, + 11236907728434185585 + ], + [ + 8045609137449889534, + 16202711686628756319, + 8684382310386899038, + 11976015809126046559 + ], + [ + 10733288510012516866, + 2076154788656020771, + 18044559881610029743, + 9096073016449196929 + ], + [ + 7905144671615160540, + 10783635808086751649, + 5523411776063518007, + 13403645893185907834 + ], + [ + 17764077820496881012, + 3635568206179979668, + 6426048422505024806, + 4265189848292747243 + ], + [ + 16526539683536908140, + 12588861570327833011, + 3214353082854373768, + 2777956265788849348 + ], + [ + 3217854840859818387, + 943498317256413059, + 1345084765986076822, + 15254121967033229193 + ], + [ + 12572651518442615530, + 3025471270830286975, + 1047774746509518234, + 1321385795793706983 + ], + [ + 16656078307211525871, + 5888489465261654229, + 3215923756560092884, + 1003958137542788275 + ], + [ + 5480821888469691123, + 10857988702136328279, + 1137095326815922962, + 12357465209764215246 + ], + [ + 2579074180679487806, + 18319237296113906693, + 1102892466010219312, + 16458320716783577649 + ], + [ + 92254087489383590, + 7332092919119409047, + 15748242357100618434, + 2667394706391511758 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json new file mode 100644 index 000000000000..7ee86f8b4c2f --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json @@ -0,0 +1,257 @@ +{ + "L1MessagesSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 18 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 8, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 531517 + ], + [ + 1, + 531517 + ], + [ + 2, + 531517 + ], + [ + 3, + 531517 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 12441068674128723436, + 11790418381101103423, + 6129861349451795186, + 4710685951407331081 + ], + [ + 5096638018234605034, + 9297760046210706272, + 11038604827668536380, + 1473851171818850283 + ], + [ + 10176456143476370592, + 6347222948505884286, + 14243463427362932109, + 2973760622304770278 + ], + [ + 5822232151084468810, + 18342623242512353783, + 9991236151502413973, + 15507511860550425736 + ], + [ + 1818744887123817977, + 12543213684249900332, + 17104140515825257324, + 11236907728434185585 + ], + [ + 8045609137449889534, + 16202711686628756319, + 8684382310386899038, + 11976015809126046559 + ], + [ + 10733288510012516866, + 2076154788656020771, + 18044559881610029743, + 9096073016449196929 + ], + [ + 7905144671615160540, + 10783635808086751649, + 5523411776063518007, + 13403645893185907834 + ], + [ + 17764077820496881012, + 3635568206179979668, + 6426048422505024806, + 4265189848292747243 + ], + [ + 16526539683536908140, + 12588861570327833011, + 3214353082854373768, + 2777956265788849348 + ], + [ + 3217854840859818387, + 943498317256413059, + 1345084765986076822, + 15254121967033229193 + ], + [ + 12572651518442615530, + 3025471270830286975, + 1047774746509518234, + 1321385795793706983 + ], + [ + 16656078307211525871, + 5888489465261654229, + 3215923756560092884, + 1003958137542788275 + ], + [ + 5480821888469691123, + 10857988702136328279, + 1137095326815922962, + 12357465209764215246 + ], + [ + 2579074180679487806, + 18319237296113906693, + 1102892466010219312, + 16458320716783577649 + ], + [ + 92254087489383590, + 7332092919119409047, + 15748242357100618434, + 2667394706391511758 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json new file mode 100644 index 000000000000..fe32152c3110 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json @@ -0,0 +1,244 @@ +{ + "L1MessagesHasher": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 66, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 26, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132096, + "public_inputs_locations": [ + [ + 0, + 1038149 + ], + [ + 1, + 1038149 + ], + [ + 2, + 1038149 + ], + [ + 3, + 1038149 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 14777139935278588112, + 4852642306346514505, + 528870644537653013, + 12766220607019407671 + ], + [ + 4257836937173457180, + 18105850810127810627, + 12045855835945477909, + 1337145016913030516 + ], + [ + 13540294178921617935, + 5675450379425866696, + 8080330723590348862, + 12416515377803888920 + ], + [ + 3171578350856517770, + 6539655571602714350, + 17682924767985674977, + 8074611540701237863 + ], + [ + 14866967567212658098, + 14985810164396930899, + 14103564390721978582, + 2713291878303732148 + ], + [ + 7209698436584637628, + 72403128177350562, + 13748975409439240331, + 17101408191037730854 + ], + [ + 7094792714865445950, + 14145350607330203478, + 3322372571606796615, + 7791275147072878055 + ], + [ + 10260092656566629894, + 6872708783997532427, + 5457407604248314227, + 366003053747525096 + ], + [ + 6163187172733089710, + 15116272236856095840, + 8980783297696807334, + 4318634308458673791 + ], + [ + 22911656643808543, + 4389862417760095893, + 8180530007173246228, + 15363392102238906744 + ], + [ + 16724058906600359122, + 9749245991791698283, + 3733079220084897482, + 35144727903715636 + ], + [ + 1733024683910700810, + 16815568708094698990, + 9597261785243145371, + 14191876845225710581 + ], + [ + 3368783094877746336, + 10313180424218970297, + 7411576603144233838, + 18155104604678927944 + ], + [ + 15539244454544408034, + 14071575935246766022, + 3167686754143854069, + 2580957889210849319 + ], + [ + 11188593692389277627, + 3317111011441128346, + 18315606312625447776, + 14080235054242793975 + ], + [ + 11188480902959932408, + 16241470651544083095, + 17491552077640160913, + 1747401256351375709 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json new file mode 100644 index 000000000000..0d2da6f32cb7 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json @@ -0,0 +1,283 @@ +{ + "MainVM": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 8, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 68756, + "public_inputs_locations": [ + [ + 0, + 1041222 + ], + [ + 1, + 1041222 + ], + [ + 2, + 1041222 + ], + [ + 3, + 1041222 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 10, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 3951964484115607655, + 16829660660304901588, + 14211539760499261401, + 2472527790054023464 + ], + [ + 10602340213073929688, + 15269906034841934319, + 1999722205715604105, + 11671865217787642490 + ], + [ + 16987932594962878624, + 3285981047520267163, + 4222968438268808943, + 2575664426357529034 + ], + [ + 2918713921338525202, + 9601231618190155084, + 4500898479717158082, + 4495729168148349252 + ], + [ + 11155257299174723909, + 15132078140316221530, + 2851783895665737890, + 13286182309288546626 + ], + [ + 13156126079595134586, + 3129292503659656130, + 13013338048823100513, + 1046712685787755983 + ], + [ + 11100676529077504927, + 16645386371265872013, + 18436391699309598214, + 18316046327508256708 + ], + [ + 7941963338454253492, + 13992861888733776514, + 286345419062083924, + 7799721824685750420 + ], + [ + 10685589255052466830, + 14117977365602684661, + 9968846665194915012, + 2894934014962806033 + ], + [ + 14717979199076289807, + 11834986281925334505, + 9871430568597302240, + 6077792418098417085 + ], + [ + 13903843835636142856, + 13458907111742740634, + 3266506741999179349, + 6451325180775835720 + ], + [ + 1714065376768082879, + 267154899590020848, + 7203223479936126047, + 1425749651129432103 + ], + [ + 16369853181249865160, + 8023509533469270339, + 1080007288885554015, + 8528063843972547976 + ], + [ + 13901281976847998439, + 13482886048424426728, + 4714698685260429252, + 8450188099405888087 + ], + [ + 17148225032190900757, + 16136144457754963192, + 9054464804385647991, + 7309929357910934174 + ], + [ + 15279738581003263809, + 3233028670055847880, + 9675016189592150706, + 882792948810468198 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json new file mode 100644 index 000000000000..afbdd17f87d3 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json @@ -0,0 +1,257 @@ +{ + "CodeDecommittmentsSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 18, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 1021854 + ], + [ + 1, + 1021854 + ], + [ + 2, + 1021854 + ], + [ + 3, + 1021854 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 2638073007663622156, + 8095790926675075575, + 16070536450025430297, + 11107879245782883310 + ], + [ + 14146741033954484667, + 6190536674348638720, + 16225788979445059477, + 14054620090462985529 + ], + [ + 11640224008014629596, + 641539748496027160, + 13808722951176221096, + 16170765986761751839 + ], + [ + 3935980412801468150, + 1369763633048581729, + 15164038222707237449, + 13549026317001505493 + ], + [ + 7347140194150198874, + 3761583621533582182, + 1201042008705759557, + 4518814071203771589 + ], + [ + 800427219378311884, + 9408589372717347086, + 4254572946942417329, + 5142794058426597251 + ], + [ + 9025763675471789857, + 9658241200006349915, + 10843576536878471228, + 4504613934156851017 + ], + [ + 924391528635837029, + 17275471398483292983, + 7119295641875104852, + 3574531397848859770 + ], + [ + 9377840526717456169, + 10735342053764638034, + 2342156236435128394, + 14166002014472046096 + ], + [ + 2892383637971079443, + 13418647945623595756, + 10019182992393923816, + 9844763621346094605 + ], + [ + 10882982703274329811, + 1514425380968646350, + 13439208364741860903, + 13990068349260696136 + ], + [ + 15895812818511549818, + 15738749976988188006, + 13440084002922282596, + 14578356625798184093 + ], + [ + 3859406845557969736, + 17314298659359090415, + 16770924942850282883, + 486597592063200525 + ], + [ + 11378407834848513159, + 4967859104549187166, + 13937264085276400573, + 7478354099484226349 + ], + [ + 1449906124962973794, + 5408228139111124399, + 1658036384062801904, + 7066463570538863033 + ], + [ + 15186027246389802614, + 9949859568958827686, + 11971923963356626879, + 15735564656222075589 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json new file mode 100644 index 000000000000..0cfb70f82924 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json @@ -0,0 +1,244 @@ +{ + "CodeDecommitter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 108, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 4, + "num_repetitions": 11, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 12320, + "public_inputs_locations": [ + [ + 0, + 1045893 + ], + [ + 1, + 1045893 + ], + [ + 2, + 1045893 + ], + [ + 3, + 1045893 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 14477188964753033185, + 8969677320239131616, + 1511337688601558079, + 6903237435877294238 + ], + [ + 6879979662383013500, + 9972710512945599417, + 1141938191658961290, + 7985281381511529082 + ], + [ + 17209416762295781376, + 598939655522077579, + 6689912638469680235, + 4921519662278829132 + ], + [ + 2218810108211543567, + 17367409571577782381, + 4068453030111399481, + 2894111853945751344 + ], + [ + 9999042349572898395, + 7429179575907305306, + 10123408942776369379, + 3022715221462077728 + ], + [ + 10045842633239015513, + 4244812848324665170, + 12301343603596417356, + 11332920712778059030 + ], + [ + 15900139291770141663, + 8192446346506891091, + 10086891539583546802, + 7343942987745068197 + ], + [ + 6124221718954912549, + 13486682166896696529, + 15097291952143481844, + 16653894364467704495 + ], + [ + 12766623698334678967, + 1729058559883227397, + 1411108054906351423, + 13278453333171202065 + ], + [ + 12233418151438626108, + 14016138745865492456, + 13255147568691004416, + 14998854132551828470 + ], + [ + 10323923076292169703, + 8158278707949376146, + 12845614783152862914, + 5914093648720582597 + ], + [ + 13520835009196520971, + 14417779140547238889, + 6862603050786324034, + 10245030009169430808 + ], + [ + 1835499986105723876, + 9973301486190772269, + 3431085138170097359, + 16617926458565371046 + ], + [ + 6995430833584764582, + 10186803315798237521, + 13404931797112939412, + 17530795913574984460 + ], + [ + 10883424944588923206, + 13314595728239865895, + 3282096066350298749, + 3956046981299225896 + ], + [ + 12087054656445457911, + 7314398367646261307, + 7998118142061675046, + 11673364943123337175 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json new file mode 100644 index 000000000000..a4a410d90a26 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json @@ -0,0 +1,257 @@ +{ + "LogDemuxer": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 136, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 14, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 770856 + ], + [ + 1, + 770856 + ], + [ + 2, + 770856 + ], + [ + 3, + 770856 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 13241330279191725103, + 13753032629530173038, + 2076223530664948596, + 3297807117114706044 + ], + [ + 11713836288112670191, + 1177996762465404370, + 7559895083149599749, + 3210974432904624409 + ], + [ + 12556775438783409088, + 12836668195118427958, + 10659579382288992879, + 14824820023358081641 + ], + [ + 12129434367710731467, + 13223863980858698592, + 15076313205959171338, + 8812832758598326992 + ], + [ + 5619695298194316584, + 1702543119292958822, + 10286311332797928654, + 5029271658667181176 + ], + [ + 7415141098448981547, + 15663039494509354932, + 13208197120197557194, + 11245742858683836013 + ], + [ + 3002540241136310707, + 11547797899694244403, + 7124622061839424949, + 10949013563713078494 + ], + [ + 17142575575809782204, + 13800993532867337554, + 4423537342426483807, + 12089179399318945120 + ], + [ + 5543363940431137493, + 14528536317911082899, + 3928220692870214567, + 7185369207264833028 + ], + [ + 2815159846192152478, + 16507211682718130921, + 1793329775903937916, + 6473686931817864950 + ], + [ + 17815165628195346102, + 9542948826192641186, + 14973284068738873799, + 13577641628730921985 + ], + [ + 17938393397553240876, + 15660715751237780491, + 12630446844016399148, + 11862059154139259048 + ], + [ + 11953996319846633859, + 12131238563851642562, + 5803319004748576191, + 10988868046472383675 + ], + [ + 3859400868090135128, + 15214844687221204138, + 13973059553580269639, + 7853383910131759805 + ], + [ + 11592486898864810791, + 4871056958970591747, + 137946356858301988, + 14529417267976359973 + ], + [ + 11093343120608557204, + 14684319039324015274, + 5221221840195929029, + 17478918223903237221 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json new file mode 100644 index 000000000000..f7ac8502f3ad --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json @@ -0,0 +1,244 @@ +{ + "KeccakRoundFunction": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 86, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 14, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132096, + "public_inputs_locations": [ + [ + 0, + 1047292 + ], + [ + 1, + 1047292 + ], + [ + 2, + 1047292 + ], + [ + 3, + 1047292 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 9025410449149543081, + 2150978872708352380, + 11929541854331362678, + 10889802961463093007 + ], + [ + 14367653170042876779, + 15711919146034996716, + 10192530351300036212, + 17779266089181903535 + ], + [ + 16518023242439166042, + 16125234777789979115, + 3731170852541886643, + 6478513303454611533 + ], + [ + 18022090193594520460, + 9824039918773778848, + 8099323107578446833, + 14138481655991300874 + ], + [ + 9646131916658144639, + 14765462438355160604, + 12353948730635165989, + 17374238707731963259 + ], + [ + 6466098066822358798, + 3802784570329552578, + 11192384635627240892, + 16889566382350703339 + ], + [ + 13295229914781218631, + 11477715700480687057, + 1029809241419010036, + 17026448985101402834 + ], + [ + 2928603244677043291, + 2590454321011930112, + 16594893027153225789, + 17268049387874967289 + ], + [ + 5231897347421383206, + 3542534855630287592, + 15172142009555909931, + 1424027296261247931 + ], + [ + 6943787726298694042, + 15335886870449394305, + 14785428951904960648, + 11215936320351406370 + ], + [ + 11447524278789270182, + 14266446056724893962, + 10914488308431466718, + 7364502792097837348 + ], + [ + 11359545309848431234, + 4980893295986349022, + 11473702556031439650, + 17861564638231497628 + ], + [ + 17663843964156179007, + 14833488899297277996, + 5714793925932097698, + 6902306052141283285 + ], + [ + 10270525424019036326, + 1923061535861034720, + 16424397298429761441, + 7171630776964282144 + ], + [ + 10524076026990794794, + 15223680225877637426, + 396032395140092130, + 7923171480236200520 + ], + [ + 6233273405562217643, + 4452358004773676392, + 6128591467452883036, + 3468440652866645203 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json new file mode 100644 index 000000000000..e46c80f82e2f --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json @@ -0,0 +1,244 @@ +{ + "Sha256RoundFunction": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 116, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 4, + "num_repetitions": 9, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 12320, + "public_inputs_locations": [ + [ + 0, + 1039793 + ], + [ + 1, + 1039793 + ], + [ + 2, + 1039793 + ], + [ + 3, + 1039793 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 8587183264607820083, + 7501401150925504063, + 602581967238805636, + 11668946895593393939 + ], + [ + 2157265850227756788, + 11032761412102625645, + 3754093130785871858, + 18006602563614829680 + ], + [ + 10655937569249931504, + 126923738105413051, + 7841305508214111486, + 8495811533395543706 + ], + [ + 4193341982694972421, + 11921991451443354914, + 6997364257885731873, + 17667686448643761264 + ], + [ + 9540865900297042782, + 6139586301331019438, + 6145465934548908233, + 7114335385952641784 + ], + [ + 15094840473303346456, + 7747511060131015936, + 12772480149916714485, + 18349138296645060984 + ], + [ + 12531028814399847357, + 7203517905344132405, + 3061962363713004033, + 17452361121681943835 + ], + [ + 3835698399746542066, + 8245837273981884818, + 11550626417605245298, + 10420313830541187114 + ], + [ + 12555999497449784434, + 1714733525282428144, + 17219304496440144341, + 1381616758422006774 + ], + [ + 9988238398674725191, + 10677391130703045133, + 11464212366701391798, + 273721172960421145 + ], + [ + 16336643358125081536, + 9704885119523966469, + 12504901168607854020, + 1365225079498514628 + ], + [ + 5689583869880509287, + 884372117462576406, + 12127613936064786875, + 11036164135756420898 + ], + [ + 3756438513920537389, + 16008730255465618263, + 5676153503855975547, + 8859399175348528504 + ], + [ + 13012737189692792855, + 1923486022173657097, + 13451763503173292053, + 18260610382109438664 + ], + [ + 7568548647776403884, + 15888201829935950536, + 14213035939781028448, + 557143869736885619 + ], + [ + 32671974711448889, + 2434480108628229517, + 1058613992685145857, + 12709975455363119775 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json new file mode 100644 index 000000000000..b905a476ea43 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json @@ -0,0 +1,257 @@ +{ + "ECRecover": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 80, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 16, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132096, + "public_inputs_locations": [ + [ + 0, + 872841 + ], + [ + 1, + 872841 + ], + [ + 2, + 872841 + ], + [ + 3, + 872841 + ] + ], + "extra_constant_polys_for_selectors": 2, + "table_ids_column_idxes": [ + 6 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 13818450912197620420, + 5079205692118648775, + 14615787041360044769, + 2941606671776647183 + ], + [ + 6715253104770723417, + 3160280029457127352, + 11108406980823166906, + 15487865556610611893 + ], + [ + 14039903923831613967, + 15298198763143829103, + 17031409250405123985, + 10266023324667771113 + ], + [ + 17366151300788544369, + 13314676565834570017, + 17521241757753748935, + 13066688955830816807 + ], + [ + 14445090483790969730, + 15708367780098206326, + 2336844413511710318, + 3268235585540529265 + ], + [ + 2882405134850480170, + 14247534382965114291, + 17531255653612736614, + 11676635700695125188 + ], + [ + 11530141675448575062, + 8910365257612403024, + 300072654586353643, + 8472188536913229506 + ], + [ + 1426612518547638168, + 17806679375517512145, + 14835333334022265221, + 2007845272495904476 + ], + [ + 6034343869761808836, + 13937750910508416181, + 16942548919853718543, + 16086518391257789831 + ], + [ + 15933462173546075175, + 8612525819877657624, + 4132383244121115701, + 9288543398092863864 + ], + [ + 8157130847726661070, + 4231891352218163681, + 14620351586778336684, + 4186724240746204294 + ], + [ + 7440132245224537493, + 6666895991749911132, + 8404993517441732468, + 6556569653095950475 + ], + [ + 1982595939619922877, + 17561202624392859313, + 14381497498171193805, + 17908865555917026633 + ], + [ + 7384278864004035589, + 10191778068274570585, + 6103937442735162958, + 5142419559331404710 + ], + [ + 3651117166359200686, + 3827322296271305097, + 14799462710376656576, + 13600220646083181205 + ], + [ + 1989104086172888026, + 7796359126421144184, + 16967575681666150511, + 5993683835612332048 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json new file mode 100644 index 000000000000..01b957e7a611 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json @@ -0,0 +1,257 @@ +{ + "RAMPermutation": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 133, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 15, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 1044095 + ], + [ + 1, + 1044095 + ], + [ + 2, + 1044095 + ], + [ + 3, + 1044095 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 7748855058771730961, + 18077946489631649497, + 1126488644057748066, + 14688059039599644438 + ], + [ + 4480629341804348299, + 10505662440791981234, + 4568412032951786787, + 12296506456394181969 + ], + [ + 16177866372671364827, + 6970256790084749443, + 10619139891136255069, + 1607793233799494191 + ], + [ + 16984252104671889635, + 13549428959009290270, + 18134611582044419523, + 13805480879905126881 + ], + [ + 17770436976754840017, + 7234588192906938750, + 1676460085700470353, + 17733573771328390126 + ], + [ + 1322939182961086562, + 5294941824911180446, + 10983825026212251207, + 4904636572110590284 + ], + [ + 12784739321844360991, + 12439305138735676805, + 14983461304040938818, + 17269069332772868104 + ], + [ + 14780190734158735021, + 13940544738219743565, + 6645149114623433718, + 13466406487834863255 + ], + [ + 13329778603033226548, + 10757456562158453823, + 16599667503315631352, + 7621238797658185159 + ], + [ + 14547407989101566794, + 13324264894451648565, + 16566710504362716031, + 4779331080355111127 + ], + [ + 6132579229855214454, + 17610416320024829323, + 12304246579944377351, + 9688211256511656964 + ], + [ + 8981542755583161308, + 5091565442848149167, + 13934425064181076259, + 9294930870454289441 + ], + [ + 7427098481125065729, + 13578369070049130481, + 11513105383705002933, + 9750527547580548099 + ], + [ + 5745702296484372803, + 17242736621178757499, + 11421559995636138498, + 12684122852092168791 + ], + [ + 1002992144601037215, + 16187923653560782188, + 5293022176068028122, + 9959247706453715838 + ], + [ + 4182061746333368731, + 5244109339200264013, + 10015150430260308263, + 11549298210681275420 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json new file mode 100644 index 000000000000..59040a91d3e6 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json @@ -0,0 +1,257 @@ +{ + "StorageSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 132, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 1, + "num_repetitions": 16, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 256, + "public_inputs_locations": [ + [ + 0, + 1044628 + ], + [ + 1, + 1044628 + ], + [ + 2, + 1044628 + ], + [ + 3, + 1044628 + ] + ], + "extra_constant_polys_for_selectors": 3, + "table_ids_column_idxes": [ + 7 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 18353038824771834181, + 6180039936290620596, + 9566046629535886545, + 1194749282273701730 + ], + [ + 25931497575710606, + 4939580180726571178, + 9971692296422121050, + 10321682835888101865 + ], + [ + 14476617363912627618, + 18433740955496184028, + 1866402502799305529, + 13630516779228168836 + ], + [ + 5297717495027830927, + 10843206047833997321, + 12103660641452100213, + 962276586389403171 + ], + [ + 18191621713764018306, + 18034247738724721784, + 17062774240988584015, + 4793091222754364718 + ], + [ + 11975292609810709594, + 3410068686607534834, + 7176581702346144340, + 15010624823757225375 + ], + [ + 4386781545910081212, + 12096044536926128763, + 5099067130359909936, + 4702254698651040446 + ], + [ + 4564824446576585585, + 10282250482097501467, + 3576706676505948505, + 8070857080847133156 + ], + [ + 4021025381485058227, + 15830498207667536258, + 11765654863279694638, + 8603645468978049764 + ], + [ + 12131291478449851192, + 884696930963928594, + 5603953053234603685, + 15160539006841845005 + ], + [ + 137229235654780143, + 1982984178531442102, + 8969623252708511178, + 2605510294647382796 + ], + [ + 117138863215117992, + 11155889783333849588, + 11385131969922449424, + 2463919755275823513 + ], + [ + 3448822069412323905, + 6853611764127119403, + 1354074653325845412, + 10520097888465643033 + ], + [ + 12126792712339142861, + 3208769323001970463, + 8541345094141085129, + 5739333931443919780 + ], + [ + 6375726425445642922, + 15388895865216583137, + 7439749375147960286, + 16154657507801467365 + ], + [ + 5187536080858526742, + 8938762330808016184, + 441459701363466307, + 11617235719453000530 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json new file mode 100644 index 000000000000..82e2d2f2fb54 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForRAMPermutation": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1125289536020813216, + 13893991227523403350, + 18221032481002623145, + 6999555513372134161 + ], + [ + 14558032258404044879, + 15896302942503207712, + 4320679543001532335, + 16312301655514654219 + ], + [ + 7101986692613628904, + 7577883870349313041, + 7352728228661094923, + 18332380179278822986 + ], + [ + 4857477437658850102, + 2600358150758031758, + 11245333823831173537, + 8338725066623873242 + ], + [ + 7533080307752291488, + 7286216489335488511, + 18156637335160778785, + 7462498443331890731 + ], + [ + 606568432443359176, + 8912183283992686330, + 17421481837200753913, + 17592999343458504164 + ], + [ + 13072668834394870334, + 11175441683787645540, + 3718467031089360132, + 6303569707785751909 + ], + [ + 15139014418351999292, + 13433960894156419831, + 1081036147938149073, + 5537900067858640688 + ], + [ + 16144198516884069513, + 11760722486204114604, + 9080477633162807038, + 14878319203527003921 + ], + [ + 9887232148319546846, + 11280977977331453386, + 1634486104168251049, + 1013174085024142997 + ], + [ + 8774759106642276381, + 17014116512461272516, + 5017632137039687644, + 2879573590247199312 + ], + [ + 8532316813139433929, + 10192336124962558528, + 10208988044571331050, + 7412443809890180963 + ], + [ + 1940771445624788955, + 15990599983917575017, + 12383682653785412359, + 7243892390926482974 + ], + [ + 15783323653576062669, + 7433660384180142428, + 11341821314666985051, + 13908042579613943595 + ], + [ + 6784650697753378650, + 2429262522610065724, + 3770879433095160288, + 6633370836632857456 + ], + [ + 18435367235881428398, + 13152985860267484403, + 17561012172979073263, + 15335033836397886699 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json new file mode 100644 index 000000000000..14b20e4f718f --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForStorageSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 14038008090973880203, + 7437991399998284269, + 12502661164686015780, + 6154468052477899229 + ], + [ + 4890279821462461425, + 12267605659025997985, + 16220814561210675069, + 14958418982789304352 + ], + [ + 16239014851914932689, + 11626285279514581577, + 7827403995053907198, + 3320808129263057989 + ], + [ + 9987109148114223767, + 16027730699082584407, + 16226544327370131567, + 10505655809748447851 + ], + [ + 2426355028560688438, + 13015409833156179441, + 3357212938175132730, + 9924499217906835800 + ], + [ + 7264556678081366657, + 11014021481903289086, + 1185381295776166890, + 11220095453847753366 + ], + [ + 10738220050569983945, + 2071033793611953608, + 2836853848682426199, + 18280211532291996343 + ], + [ + 4622574899935206725, + 10283505057353003539, + 10924169390994336784, + 9267200805799259741 + ], + [ + 4991426063445236730, + 292198960832094512, + 6370230421874009175, + 2987533577516974457 + ], + [ + 15100014620403370288, + 17064710328307274600, + 13596338039199898149, + 7844302147920229272 + ], + [ + 6997319402399846472, + 5312486909661565204, + 8133503726683094273, + 14376435888676319871 + ], + [ + 16536431163453527335, + 8329243612205528007, + 10332326446350256878, + 6187024786825219302 + ], + [ + 15819705933365601754, + 17218893784817004570, + 7197154299986843671, + 11662127518680895562 + ], + [ + 12757050724806983838, + 14916998582501427105, + 2903621530266216761, + 12948020673936426635 + ], + [ + 14563493065638885359, + 6770003101729110728, + 11839394563403429402, + 1065983546047670743 + ], + [ + 2845847955135199124, + 16066115065717446946, + 4482870472147946913, + 8664518745998140088 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json new file mode 100644 index 000000000000..047a79433899 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForStorageApplication": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 17762498852322133081, + 12402705633936516386, + 2303979245623416237, + 15492962941180331409 + ], + [ + 1368919680062481249, + 8419725427681044227, + 12407005054982229959, + 9729546646519366060 + ], + [ + 3694696222464991628, + 10691747603876514511, + 5648287760852638838, + 15128008410193030270 + ], + [ + 5647849659158863308, + 16391316755630265342, + 17483459471194878342, + 2382689231083026500 + ], + [ + 414523452897415096, + 14712743039552404085, + 14274376366377496980, + 1540457029378813951 + ], + [ + 6437956396547385520, + 10457280544359552653, + 210288303177892964, + 7009065088863365256 + ], + [ + 6189643588169700860, + 2874522095144611328, + 3459596951253545261, + 14912093041250189548 + ], + [ + 2954035721997683722, + 2628438295425873126, + 9361498414301919378, + 7780135632218518403 + ], + [ + 13376229283479650476, + 13646160168852625209, + 12342809006526169374, + 16140909717103038788 + ], + [ + 14544916717622160085, + 2335857756498039096, + 12834512355397127233, + 8257858357688008275 + ], + [ + 13637749549385428585, + 1568326361689976373, + 14573670474737748882, + 8002611813857126901 + ], + [ + 4981475697544147574, + 7477162419770815721, + 13420952345288491036, + 6849943909220872064 + ], + [ + 5645683284474222575, + 10480504810673180938, + 7038844793157124351, + 10701205261596194736 + ], + [ + 2992787956816905753, + 10666728141278334493, + 4748213040479579674, + 13258093297981567423 + ], + [ + 11477426903799919629, + 24925561182649344, + 11412223773538266154, + 2852175545463505023 + ], + [ + 1060175052523024730, + 6610510112497451814, + 15229121744185849414, + 12773820515972201248 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json new file mode 100644 index 000000000000..9b8c7bca3296 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForEventsSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1107277819469948429, + 7779138633457495557, + 9933991506239962590, + 13247997120867942045 + ], + [ + 10020877950065961527, + 6525352303263078852, + 10601857031603992424, + 6699333963081308583 + ], + [ + 8019436446207312706, + 17880026081547931046, + 14023459613581442883, + 12177363081042438182 + ], + [ + 17643512238638359026, + 2759065364120570462, + 1113452962298378930, + 9944550331137276877 + ], + [ + 6208699382898547395, + 9442366708032685349, + 9362620233586526034, + 6406469355002722194 + ], + [ + 17265154700194893264, + 11486849446382907011, + 1331827641678752332, + 13890193454573854721 + ], + [ + 7338198937132638061, + 9619578268260381257, + 16966504852427653624, + 5042032213830518832 + ], + [ + 9998014800451912206, + 2764915420573986646, + 12638108373731502079, + 13849566240043998295 + ], + [ + 18402224478111895268, + 10245397321907314013, + 15810832121998678624, + 16050833323870358750 + ], + [ + 5754119484130347551, + 1334330314055286585, + 1196783225751134982, + 13693638204576454858 + ], + [ + 7476283313073466871, + 3327838189135133206, + 7576584001149251522, + 4746763672176501097 + ], + [ + 8341294580974175099, + 6996214973372400649, + 2825261487886819108, + 17611476352036968111 + ], + [ + 6481216673139681707, + 12834349834818063790, + 14423475559705119809, + 15943814042360079510 + ], + [ + 7771500178827314392, + 5968639878444939173, + 18006309838458312166, + 368714734303788414 + ], + [ + 2137428658614683231, + 4604901863694850124, + 3581156028309568037, + 7485386108131533730 + ], + [ + 1078544443818230878, + 14117476483719501663, + 17985826373971579789, + 10600652728062682193 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json new file mode 100644 index 000000000000..e32be9870e7b --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForL1MessagesSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1107277819469948429, + 7779138633457495557, + 9933991506239962590, + 13247997120867942045 + ], + [ + 10020877950065961527, + 6525352303263078852, + 10601857031603992424, + 6699333963081308583 + ], + [ + 8019436446207312706, + 17880026081547931046, + 14023459613581442883, + 12177363081042438182 + ], + [ + 17643512238638359026, + 2759065364120570462, + 1113452962298378930, + 9944550331137276877 + ], + [ + 6208699382898547395, + 9442366708032685349, + 9362620233586526034, + 6406469355002722194 + ], + [ + 17265154700194893264, + 11486849446382907011, + 1331827641678752332, + 13890193454573854721 + ], + [ + 7338198937132638061, + 9619578268260381257, + 16966504852427653624, + 5042032213830518832 + ], + [ + 9998014800451912206, + 2764915420573986646, + 12638108373731502079, + 13849566240043998295 + ], + [ + 18402224478111895268, + 10245397321907314013, + 15810832121998678624, + 16050833323870358750 + ], + [ + 5754119484130347551, + 1334330314055286585, + 1196783225751134982, + 13693638204576454858 + ], + [ + 7476283313073466871, + 3327838189135133206, + 7576584001149251522, + 4746763672176501097 + ], + [ + 8341294580974175099, + 6996214973372400649, + 2825261487886819108, + 17611476352036968111 + ], + [ + 6481216673139681707, + 12834349834818063790, + 14423475559705119809, + 15943814042360079510 + ], + [ + 7771500178827314392, + 5968639878444939173, + 18006309838458312166, + 368714734303788414 + ], + [ + 2137428658614683231, + 4604901863694850124, + 3581156028309568037, + 7485386108131533730 + ], + [ + 1078544443818230878, + 14117476483719501663, + 17985826373971579789, + 10600652728062682193 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json new file mode 100644 index 000000000000..9457eb00fb97 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForL1MessagesHasher": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 18238935086181014750, + 12673103801320172126, + 1807450351340584945, + 4080587540382410469 + ], + [ + 3576906271507691924, + 15842010882262104289, + 1545568012269070598, + 15019610257262428212 + ], + [ + 16552529329663272195, + 70143638148036568, + 9441616425189858949, + 12576239326961652577 + ], + [ + 13378751877668829423, + 8821335076667849619, + 8787507195664458069, + 8033428383364368883 + ], + [ + 14859204728026468678, + 67528639960702832, + 12174200483518178527, + 14324674266854914755 + ], + [ + 9830165552717527013, + 2321461270838214863, + 9268724714979319202, + 9904762657753448069 + ], + [ + 14141058045407997705, + 17031147612244105327, + 12751542125666982456, + 17817764425153554681 + ], + [ + 14795807291665277125, + 12320949525745092193, + 5617160704961099, + 16219204181913320518 + ], + [ + 7773282231989156729, + 13990108174498859083, + 6307778800331536092, + 5637115465294994933 + ], + [ + 3720582507396745477, + 12235229471532413465, + 2832424082557414313, + 1295093033129086530 + ], + [ + 5238251184464937674, + 2468597264523797445, + 7200015202778095391, + 6285172799678453354 + ], + [ + 14592230848145258634, + 14635944054407782259, + 16328656124118469880, + 5673837317773168465 + ], + [ + 10220932976054066577, + 587071736468910470, + 18317195354162201630, + 4442910666147223606 + ], + [ + 6686416988414600368, + 14769819815353713716, + 7130058524252605584, + 9117426323287817862 + ], + [ + 9696785136959918927, + 10735699192129851744, + 4483660550392452518, + 16920055661791281465 + ], + [ + 6465118959707729559, + 15053655525644243783, + 11077790678846863387, + 377514359817848250 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json new file mode 100644 index 000000000000..228a0e9fe926 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_1_key.json @@ -0,0 +1,270 @@ +{ + "SchedulerCircuit": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 4, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132096, + "public_inputs_locations": [ + [ + 0, + 993345 + ], + [ + 1, + 993345 + ], + [ + 2, + 993345 + ], + [ + 3, + 993345 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [ + 8 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 5188923951567823784, + 1839604164061896861, + 3760614143058722712, + 17614504340708244503 + ], + [ + 7889899638667026800, + 14244435798393850379, + 15230145556400915502, + 12762495992738055897 + ], + [ + 1590798346328722678, + 14143092007536417439, + 10480901561038728792, + 3201431705395147463 + ], + [ + 2780378477031897976, + 11901528146276690135, + 1343277030558816196, + 6658753207411088573 + ], + [ + 11039463659901501365, + 8235548863391687887, + 1033553352576624721, + 12882010447949399432 + ], + [ + 18078277235848158043, + 14794319235551634496, + 13982848369540832169, + 11146980369941489422 + ], + [ + 5423143341883663864, + 15258729611778297770, + 7733187200367671156, + 11434904591161598775 + ], + [ + 10914070908442174902, + 8055525792807466851, + 14391942428843610452, + 11749906933466154458 + ], + [ + 14580351359387308464, + 13254290427053014332, + 7257863927775762043, + 11078203905320069045 + ], + [ + 6123238811378029441, + 11756658038961859601, + 760000874907607862, + 678236515728235822 + ], + [ + 15657816790157674514, + 4104741954972330508, + 4150394799973679527, + 15124992265078810298 + ], + [ + 13825567788010925982, + 636544017935987097, + 2260460249587621344, + 10354042489703999934 + ], + [ + 12710868603685796297, + 91862114057079406, + 5614554900380483346, + 131393259919990755 + ], + [ + 13185811107579017595, + 1006028503100864020, + 2087984259170414019, + 6445764843889735797 + ], + [ + 10414938568348349467, + 15415934042755645234, + 11692038010863343064, + 2402843492027871760 + ], + [ + 17752536940710015241, + 14329244239886245722, + 16349180633511906354, + 2663305413222761702 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json new file mode 100644 index 000000000000..7865e106454e --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_2_key.json @@ -0,0 +1,262 @@ +{ + "NodeLayerCircuit": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 16110944391299715992, + 6257581196984129533, + 3238325178821009741, + 2344850491864189019 + ], + [ + 9070724167014080545, + 4270936005334206500, + 14011092173278602876, + 17233390044312666340 + ], + [ + 15882294806312417165, + 4574813182503183418, + 15374759504522847393, + 12609068726586761599 + ], + [ + 5081359810005150600, + 3073725930902770385, + 12151383627606620216, + 1678712612408922293 + ], + [ + 13389075440637154488, + 1394733244174774927, + 15897027408886080730, + 8756205416909346149 + ], + [ + 9635595243774498130, + 12944626865667316474, + 11443383015868895087, + 11271399114434241688 + ], + [ + 15730316965377191644, + 9302195093067483199, + 13013113029527355010, + 16107136888029757437 + ], + [ + 4376996761649023946, + 5151155327098069058, + 5052643273518683586, + 4214154406154441301 + ], + [ + 14323780220991293990, + 8193587898306996901, + 5671887774622993207, + 9546628649033002185 + ], + [ + 16523271232278987128, + 994857983084927437, + 14501829109938165419, + 9015660151307809950 + ], + [ + 1530238726285436995, + 6261885523422263637, + 11940153058268689285, + 15737357444014615384 + ], + [ + 2670341602838046451, + 10669331667080282584, + 16656965855764533819, + 13339778044433609883 + ], + [ + 17128805815986618686, + 18194734266790270296, + 5735422502154213482, + 10164141197176685232 + ], + [ + 2629176720116777217, + 6966722226648521547, + 2937669813272776408, + 2812827195714811672 + ], + [ + 6178870790111010071, + 10834984121929556338, + 2836091052290008872, + 1311164878771236983 + ], + [ + 7411275786539821863, + 3702190765468277039, + 18130480549896087952, + 5277641488054089382 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json new file mode 100644 index 000000000000..1242852599aa --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForMainVM": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 2430557199151730472, + 13851025833293931453, + 17414806893516082410, + 5061662678932524341 + ], + [ + 14805681278570572304, + 4387660327327875475, + 8111350906698296387, + 14076974072563325480 + ], + [ + 10148467155144800386, + 10828200166982881680, + 14976161870128776785, + 455207310829672888 + ], + [ + 1258747241285388938, + 14533915542892847175, + 11947483492490443559, + 18136160672641752159 + ], + [ + 18399548687624620221, + 1188113040663592329, + 14887047843403680931, + 14373371518617662284 + ], + [ + 8697036667032805923, + 6757156065833582242, + 6438944907880440651, + 4699569537038573259 + ], + [ + 17755443425518132182, + 6748052206085081881, + 12550413684321582429, + 13208184919188659814 + ], + [ + 6748673664527653571, + 14319837795061250020, + 8674881656449995647, + 186839425215983320 + ], + [ + 4611201077078896801, + 12165300337241989192, + 6834829805650716536, + 7389817613944450096 + ], + [ + 10116872626825123115, + 6146264092536253625, + 5929884222540147413, + 12657573273477702966 + ], + [ + 6925597909836314416, + 6304221625093437329, + 11202013801518338537, + 15296541511521458214 + ], + [ + 14920901110496128138, + 13336137971580002245, + 2301350809681932102, + 10816850357256930117 + ], + [ + 3712128035625350334, + 4798834377815226954, + 9689670095699838466, + 13955528595570927929 + ], + [ + 480086305820392172, + 9166809339791846490, + 6250535256378342593, + 18200236880144340041 + ], + [ + 17764897482986219512, + 4500604943295237976, + 3430272853973605048, + 17227997223311301571 + ], + [ + 12693631692464428736, + 14060221248394451382, + 6315214478974430097, + 10534028391088917480 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json new file mode 100644 index 000000000000..e8b25fd5f245 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForCodeDecommittmentsSorter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1866437491015022712, + 11793636374252065717, + 2771461065434523690, + 14888818750197177871 + ], + [ + 13530099303626962147, + 15053516955824087922, + 12339234049539021204, + 9708862473063699060 + ], + [ + 11432132052297230557, + 6677170992284491097, + 6366885341898621463, + 8111143143511568092 + ], + [ + 9907106152447520228, + 6682147062594018467, + 10264912494418416112, + 15503628246857005809 + ], + [ + 17195185271365515391, + 13597952072744597251, + 17744684609835730837, + 2231158103010709548 + ], + [ + 14293262369681823328, + 13130511952565359928, + 10899311746723421149, + 13247944667340766269 + ], + [ + 13892335977334728116, + 8911034200951442707, + 9940381085909975496, + 2442123831058139778 + ], + [ + 6225220793196790211, + 4712637343981148404, + 17195066106455293379, + 8613492331172308471 + ], + [ + 6909799331954538355, + 10338179227896084459, + 12127192147500716446, + 17400998769923799388 + ], + [ + 16539422822493187900, + 14101588151214983695, + 13891327598256492007, + 6120137922715167439 + ], + [ + 14993757510795074537, + 2243361897978774751, + 3014175478852553185, + 1107614745766341650 + ], + [ + 13868198230244075748, + 14568344587632252919, + 18167720887640456957, + 892660889500481924 + ], + [ + 17208474456800792292, + 12638116024924785718, + 17972572249167165358, + 14432332670537563027 + ], + [ + 16794312278798106244, + 18025850455584262724, + 9034611355178459632, + 4812066730993316535 + ], + [ + 9019282623207016172, + 8465996543066345624, + 11891692540217379621, + 1309821012694343566 + ], + [ + 1009066940610956673, + 6090643896458703235, + 16512441752812232072, + 14910610346758346291 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json new file mode 100644 index 000000000000..eb327eed3dd1 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForCodeDecommitter": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 232279302667736671, + 6726532338542220941, + 13706010138770265797, + 519282525097925002 + ], + [ + 1103871324670382881, + 2908131767596043522, + 1184743003960864148, + 15387210180491180588 + ], + [ + 1835139914361735684, + 16415559350382398669, + 5395927063819619365, + 11718217787759145490 + ], + [ + 16896753956696589678, + 18311507677904418762, + 3337753834358040142, + 15261701009883534681 + ], + [ + 5146023192165443108, + 6435094416669057886, + 12102399260358768173, + 11345203084302025912 + ], + [ + 12317726061088124860, + 16542505080079874955, + 14545249352878185130, + 6198318878248226108 + ], + [ + 11741052063217712776, + 6746988457930817443, + 17049940702304400525, + 664483646520961959 + ], + [ + 16848268934698055336, + 15351522766275089309, + 3303427044017225869, + 8449387423137144953 + ], + [ + 3539943683510232958, + 9977830546935578537, + 14361154867928067261, + 18078907485257653963 + ], + [ + 9615907517852235498, + 4547984845394069068, + 1881087510325623488, + 8387507487023822878 + ], + [ + 4914791735672339571, + 2927646189877435594, + 8101987065768319522, + 11220909861720631116 + ], + [ + 12470368453784044761, + 11566657313839792570, + 8916441472890022081, + 2460153038592468216 + ], + [ + 11111897832305454757, + 16681613892385931738, + 11167212997482997212, + 12907774125001975406 + ], + [ + 12356110082580425887, + 2082693370541797346, + 6346996203748293162, + 13460912313801928 + ], + [ + 17583700199336254135, + 3213348565987316027, + 6373106379194368913, + 3269747122288195701 + ], + [ + 6235590918094214281, + 6461943464583505547, + 16473683422501694355, + 5297565830886346313 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json new file mode 100644 index 000000000000..fcfc585f123a --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForLogDemuxer": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1255474782692205032, + 16087518293005221749, + 16120433120690725791, + 13557959444835590881 + ], + [ + 16027822192018731390, + 8319846902607826113, + 11762447400221114192, + 8443748859905122045 + ], + [ + 14217444156040642299, + 11667925428120549150, + 8770581120601633705, + 13711220870168951809 + ], + [ + 7514565357525361228, + 17561543150251615679, + 3154909983341532730, + 12214081580202496570 + ], + [ + 12103863316007597160, + 3323941154151772169, + 3020605753288032659, + 13719536383629040140 + ], + [ + 5692457694309768505, + 2819674835255412986, + 762859630950656893, + 8641902833919071345 + ], + [ + 17873529730032253633, + 7201386304292118615, + 11501182428688354869, + 484571398574807569 + ], + [ + 14885817894337856307, + 6275077850611154396, + 11258872656630844770, + 3539429443980133849 + ], + [ + 15063387858351738900, + 4885324227361507661, + 11843813664335157415, + 12108718617943024927 + ], + [ + 5899829642851923448, + 12815217964596374101, + 5258792099613493578, + 3492836714462054208 + ], + [ + 9767772893712446038, + 9516937526725710003, + 533138889369363889, + 1960629141548643757 + ], + [ + 5192250756718034923, + 6205844331296290914, + 16547640844499692480, + 13348222714661177711 + ], + [ + 6744522815256114347, + 9303892902465539007, + 14440545534790765924, + 7421221195917428336 + ], + [ + 354635080958416363, + 15720855927808633651, + 885375182959288083, + 10459197185009191208 + ], + [ + 3742508711441291317, + 7193882150736289342, + 17760334643806787982, + 8575009527221694930 + ], + [ + 18274184058397159114, + 5200115837479315537, + 2808181877606937346, + 17946239285125192080 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json new file mode 100644 index 000000000000..1ab34e32a4f7 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForKeccakRoundFunction": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 15278222994235313807, + 4647505541828109982, + 11601404244072907522, + 7495301362149670205 + ], + [ + 2294446454282967643, + 10852196555067806436, + 4676542110718751671, + 3650676510146080911 + ], + [ + 10036426682390389619, + 15410534417517518379, + 411748073143090898, + 1725429274294449186 + ], + [ + 10773139363930294963, + 14784009814759595952, + 4523828744129500622, + 14635565308295099932 + ], + [ + 11532260655451503527, + 2889442075290561580, + 7947536971337998641, + 9006850837384135593 + ], + [ + 18268520902352688907, + 17460815273130161567, + 5448683527846534560, + 16860223759333541117 + ], + [ + 8586752129609394016, + 17056726335999361043, + 13247832408825538184, + 10865075704067323346 + ], + [ + 4810539255563012829, + 3494541358111189199, + 7443746985302784339, + 1488118652209005646 + ], + [ + 13632843557374648899, + 11530787504038845899, + 8016420701220086345, + 2100494706314940875 + ], + [ + 12565007434827640436, + 2122488373912552994, + 7924677296826511433, + 4337201927455963919 + ], + [ + 9121346173552113908, + 8257616625819727572, + 1352571964050839537, + 1245015447612032209 + ], + [ + 5550331618999138407, + 15197131088442812142, + 17401528975137618793, + 7876503578710888777 + ], + [ + 10581471072917622415, + 11057977535360446233, + 4745650017347491925, + 16374614618217057484 + ], + [ + 15877663159259953297, + 13196700387970223678, + 987069829507588466, + 1239752961099076877 + ], + [ + 1564056242532596441, + 8225585740585112689, + 8013357208824893542, + 8291061420556283364 + ], + [ + 10408011788640723232, + 11035192730597666502, + 7808927156371652130, + 8373070655798680509 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json new file mode 100644 index 000000000000..53184d3b764a --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForSha256RoundFunction": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 6606882135817124908, + 870347107746733688, + 12589677233751162485, + 589161009871845644 + ], + [ + 2653237880188520795, + 5593713591941028430, + 14924807074602279493, + 7403178895756596709 + ], + [ + 4770385125899202728, + 16848765286027915692, + 7130735721393145418, + 13542558858028383026 + ], + [ + 10198382868561538358, + 11182212222601267089, + 2158487448188796066, + 7515784380092212678 + ], + [ + 18043800703311929788, + 12605295159363639520, + 16963777812872271598, + 13934310487890398001 + ], + [ + 17306728193061605292, + 6162556196186301425, + 15123250614620584121, + 7156136428077702076 + ], + [ + 3239169487219227705, + 4415189033224694015, + 10092040104298268727, + 3953865385297495928 + ], + [ + 13842490303827572248, + 8581552410557417158, + 6306820342544224802, + 1525290694317383658 + ], + [ + 16571790197298227277, + 273370441868121439, + 7446891486292543124, + 5407600836394474442 + ], + [ + 11518012136298307119, + 15035338047379067034, + 11014561672957925556, + 9225054298465248935 + ], + [ + 11950255612043468638, + 10166628395020495040, + 5673010277307553197, + 3641423295115612757 + ], + [ + 1072894636907573868, + 10523520096472094653, + 4897453347544558657, + 3772162500249343132 + ], + [ + 17527297802619704973, + 16260964196666506939, + 7653109999731571152, + 15253570761269944834 + ], + [ + 16258769312952303884, + 7720171109291562352, + 11124452352545828178, + 16830247676911180779 + ], + [ + 5288712429506529884, + 13145012711898589816, + 11490757447230521395, + 5486824582454772190 + ], + [ + 16641639521175638360, + 5677946044429642761, + 12635856058275795326, + 12340020456497165526 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json new file mode 100644 index 000000000000..88a48a0bf911 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json @@ -0,0 +1,262 @@ +{ + "LeafLayerCircuitForECRecover": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 1966688024276265163, + 1600999376577297955, + 9979283765343242481, + 10853158383047279373 + ], + [ + 9617115799973676416, + 1436692352837490106, + 16621229234254045212, + 17649471158808930813 + ], + [ + 10598997254576197179, + 6191890180530301291, + 485325547092687385, + 17866822217569560015 + ], + [ + 17529069959174406385, + 1822730242748867421, + 10607268541276403219, + 10369730414641253572 + ], + [ + 9559948904275293033, + 271393452476373483, + 10294727560225979037, + 13356808215545342022 + ], + [ + 3330505141292591439, + 14604912162246460234, + 13747490798131143365, + 9686392462153294316 + ], + [ + 1308334442155460802, + 8411248012498029090, + 1727122243552046217, + 1891983150748887801 + ], + [ + 13628794098518472387, + 9775581327398472118, + 10952798350389999267, + 3791915693702783252 + ], + [ + 5150729729317744106, + 15268081752408833175, + 11313693800895322733, + 7645258866415024451 + ], + [ + 4492405884498997751, + 1462600329700613046, + 4494587633368393420, + 13835293745083269390 + ], + [ + 16786735218378765255, + 13489016634632055711, + 780880140016370703, + 1632417931049291348 + ], + [ + 15419598237747857050, + 17379853454459968259, + 1377883698753277247, + 17090368996477921986 + ], + [ + 5453156352466670830, + 7921752778252981104, + 15901693682958424795, + 7759079127470880643 + ], + [ + 13945928657949258565, + 10630556046992331796, + 5947903586431352857, + 13970701039664769056 + ], + [ + 11402992940883704805, + 14254801701412570920, + 16823021910688666954, + 16435058721419375579 + ], + [ + 1434897606543124534, + 7242596307416400095, + 1722748060955112357, + 1262887759339605102 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json new file mode 100644 index 000000000000..7865e106454e --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json @@ -0,0 +1,262 @@ +{ + "NodeLayerCircuit": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 140, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": "NoLookup", + "domain_size": 1048576, + "total_tables_len": 0, + "public_inputs_locations": [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 2, + 0 + ], + [ + 3, + 0 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 16110944391299715992, + 6257581196984129533, + 3238325178821009741, + 2344850491864189019 + ], + [ + 9070724167014080545, + 4270936005334206500, + 14011092173278602876, + 17233390044312666340 + ], + [ + 15882294806312417165, + 4574813182503183418, + 15374759504522847393, + 12609068726586761599 + ], + [ + 5081359810005150600, + 3073725930902770385, + 12151383627606620216, + 1678712612408922293 + ], + [ + 13389075440637154488, + 1394733244174774927, + 15897027408886080730, + 8756205416909346149 + ], + [ + 9635595243774498130, + 12944626865667316474, + 11443383015868895087, + 11271399114434241688 + ], + [ + 15730316965377191644, + 9302195093067483199, + 13013113029527355010, + 16107136888029757437 + ], + [ + 4376996761649023946, + 5151155327098069058, + 5052643273518683586, + 4214154406154441301 + ], + [ + 14323780220991293990, + 8193587898306996901, + 5671887774622993207, + 9546628649033002185 + ], + [ + 16523271232278987128, + 994857983084927437, + 14501829109938165419, + 9015660151307809950 + ], + [ + 1530238726285436995, + 6261885523422263637, + 11940153058268689285, + 15737357444014615384 + ], + [ + 2670341602838046451, + 10669331667080282584, + 16656965855764533819, + 13339778044433609883 + ], + [ + 17128805815986618686, + 18194734266790270296, + 5735422502154213482, + 10164141197176685232 + ], + [ + 2629176720116777217, + 6966722226648521547, + 2937669813272776408, + 2812827195714811672 + ], + [ + 6178870790111010071, + 10834984121929556338, + 2836091052290008872, + 1311164878771236983 + ], + [ + 7411275786539821863, + 3702190765468277039, + 18130480549896087952, + 5277641488054089382 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json new file mode 100644 index 000000000000..228a0e9fe926 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json @@ -0,0 +1,270 @@ +{ + "SchedulerCircuit": { + "fixed_parameters": { + "parameters": { + "num_columns_under_copy_permutation": 130, + "num_witness_columns": 0, + "num_constant_columns": 4, + "max_allowed_constraint_degree": 8 + }, + "lookup_parameters": { + "UseSpecializedColumnsWithTableIdAsConstant": { + "width": 3, + "num_repetitions": 4, + "share_table_id": true + } + }, + "domain_size": 1048576, + "total_tables_len": 132096, + "public_inputs_locations": [ + [ + 0, + 993345 + ], + [ + 1, + 993345 + ], + [ + 2, + 993345 + ], + [ + 3, + 993345 + ] + ], + "extra_constant_polys_for_selectors": 4, + "table_ids_column_idxes": [ + 8 + ], + "quotient_degree": 8, + "selectors_placement": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 1, + "num_constants": 0, + "degree": 7, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 4, + "num_constants": 4, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "Fork": { + "left": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 5, + "num_constants": 1, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 6, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 2, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 7, + "num_constants": 0, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 9, + "num_constants": 4, + "degree": 2, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 0, + "num_constants": 4, + "degree": 1, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + }, + "right": { + "Fork": { + "left": { + "GateOnly": { + "gate_idx": 3, + "num_constants": 2, + "degree": 3, + "needs_selector": true, + "is_lookup": false + } + }, + "right": { + "GateOnly": { + "gate_idx": 8, + "num_constants": 0, + "degree": 0, + "needs_selector": true, + "is_lookup": false + } + } + } + } + } + } + } + }, + "fri_lde_factor": 2, + "cap_size": 16 + }, + "setup_merkle_tree_cap": [ + [ + 5188923951567823784, + 1839604164061896861, + 3760614143058722712, + 17614504340708244503 + ], + [ + 7889899638667026800, + 14244435798393850379, + 15230145556400915502, + 12762495992738055897 + ], + [ + 1590798346328722678, + 14143092007536417439, + 10480901561038728792, + 3201431705395147463 + ], + [ + 2780378477031897976, + 11901528146276690135, + 1343277030558816196, + 6658753207411088573 + ], + [ + 11039463659901501365, + 8235548863391687887, + 1033553352576624721, + 12882010447949399432 + ], + [ + 18078277235848158043, + 14794319235551634496, + 13982848369540832169, + 11146980369941489422 + ], + [ + 5423143341883663864, + 15258729611778297770, + 7733187200367671156, + 11434904591161598775 + ], + [ + 10914070908442174902, + 8055525792807466851, + 14391942428843610452, + 11749906933466154458 + ], + [ + 14580351359387308464, + 13254290427053014332, + 7257863927775762043, + 11078203905320069045 + ], + [ + 6123238811378029441, + 11756658038961859601, + 760000874907607862, + 678236515728235822 + ], + [ + 15657816790157674514, + 4104741954972330508, + 4150394799973679527, + 15124992265078810298 + ], + [ + 13825567788010925982, + 636544017935987097, + 2260460249587621344, + 10354042489703999934 + ], + [ + 12710868603685796297, + 91862114057079406, + 5614554900380483346, + 131393259919990755 + ], + [ + 13185811107579017595, + 1006028503100864020, + 2087984259170414019, + 6445764843889735797 + ], + [ + 10414938568348349467, + 15415934042755645234, + 11692038010863343064, + 2402843492027871760 + ], + [ + 17752536940710015241, + 14329244239886245722, + 16349180633511906354, + 2663305413222761702 + ] + ] + } +} \ No newline at end of file diff --git a/core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json b/core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json new file mode 100644 index 000000000000..98857952935b --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/data/witness_artifacts.json @@ -0,0 +1,22 @@ +{ + "entry_point_address": "0xc54E30ABB6a3eeD1b9DC0494D90c9C22D76FbA7e", + "entry_point_code": "0x0004000000000002000200000000000200000000030100190000006003300270000002b70430019700030000004103550002000000010355000002b70030019d000100000000001f00000080010000390000004004000039000000000014043500000001012001900000006f0000c13d0000000002000031000000040120008c0000007a0000413d0000000201000367000000000101043b000000e001100270000002b90310009c000000290000c13d0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0ad708e90000040f000000270000013d000200000004001d000002ba0310009c000000810000613d000002bb0310009c0000009c0000613d000002bc0310009c000000be0000613d000002bd0310009c000000d80000613d000002be0310009c000000f30000613d000002bf0310009c0000010d0000613d000002c00310009c000002200000613d000002c10310009c000001460000613d000002c20210009c0000023b0000613d000002c30210009c000001580000613d000002c40210009c000001720000613d000002c50210009c000002550000613d000002c60210009c0000018c0000613d000002c70210009c000002780000613d000002c80210009c000001a20000613d000002c90210009c0000028e0000613d000002ca0210009c000001ba0000613d000002cb0210009c000001d20000613d000002cc0210009c000001ec0000613d000002cd0210009c000002060000613d000002ce0110009c000002d20000c13d0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000004010000390ad70ad50000040f00000002020000290000000003020433000002d00110019700000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000002b8030000410ad7039b0000040f000000000120004c000002d20000c13d0ad703ff0000040f0000000001000019000000000200001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000000010000190ad70ad50000040f00000002020000290000000003020433000002d00110019700000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000200310008c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000004010000390000000201100367000000000101043b0000000005010433000002b7010000410000000002000414000002b70320009c0000000001024019000000c001100210000002d4011001c70000800d020000390000000203000039000002d5040000410ad70ac90000040f0000000101200190000002d20000613d000002d50000013d0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000003010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000002010000390ad70ad50000040f00000002020000290000000003020433000000e00110021000000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000006010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000800310008c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d000000000100041400000002040003670000006402400370000000000602043b0000004402400370000000000502043b0000000402400370000000000302043b0000000002000410000000040720008c000002cd0000c13d0000001f0160018f0000000502600270000000000420004c000001360000613d0000000004000019000000050640021000000000076500190000000006630019000000000606043300000000006704350000000104400039000000000624004b0000012e0000413d000000000410004c000002d50000613d0000000502200210000000000323001900000000022500190000000301100210000000000402043300000000041401cf000000000414022f00000000030304330000010001100089000000000313022f00000000011301cf000000000141019f0000000000120435000002d50000013d000000040120008a000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0ad706e60000040f0000000001000019000000000200001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0000000a010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000008010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0ad70a100000040f0000000001000019000000000200001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000002010000290000000001010433000002d3020000410000000000210435000000200200003900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000400310008c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000002010003670000002402100370000000000202043b0000000401100370000000000101043b00000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000001010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0000000b010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000005010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000009010000390ad70ad50000040f00000002020000290000000003020433000002d00110019700000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000007010000390ad70ad50000040f0000000202000029000000000302043300000000001304350000002002000039000000000103001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000200310008c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000002010003670000000402100370000000000202043b000000000121034f000000000501043b000002b7010000410000000002000414000002b70320009c0000000001024019000000c001100210000002d4011001c70000800d020000390000000203000039000002d5040000410ad70ac90000040f0000000101200190000002d50000c13d000002d20000013d0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d0ad707770000040f0000000001000019000000000200001900000000030000190ad7039b0000040f0000000001000416000000000110004c000002d20000c13d000000040100008a0000000001100031000002cf02000041000000000310004c00000000030000190000000003024019000002cf01100197000000000410004c000000000200a019000002cf0110009c00000000010300190000000001026019000000000110004c000002d20000c13d00000002010000290000000003010433000002d1010000410000000000130435000000000100041400000002020000390000000e04000039000000200600003900000000050000190ad7031f0000040f000000000110004c000002d90000c13d000000030200036700000001040000310000001f0340018f000000020100002900000000010104330000000504400270000000000540004c000002bc0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000002b40000413d000000000530004c000002cb0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310ad703a50000040f0000002404400370000000000404043b0ad702ea0000040f000000000110004c000002d50000c13d000000000100001900000000020000190ad703a50000040f0000000001000019000000000200001900000000030000190ad7039b0000040f0000000001000433000002d20410016700000002030000290000000001030433000100000001001d000000400210003900000000004204350000002004100039000002d202000041000000000024043500000000003104350ad703ae0000040f00000001010000290ad709ab0000040f000000000100001900000000020000190ad703a50000040f0002000000000002000200000006001d000100000005001d000002b705000041000002b70630009c00000000030580190000004003300210000002b70640009c00000000040580190000006004400210000000000334019f000002b70410009c0000000001058019000000c001100210000000000113019f0ad70ac90000040f000000010800002900000002040000290000001f0340018f0000000504400270000000000540004c000003090000613d000000000500001900000005065002100000000007680019000000000661034f000000000606043b00000000006704350000000105500039000000000645004b000003010000413d000000010220018f000000000530004c000003190000613d0000000504400210000000000541034f00000000044800190000000303300210000000000604043300000000063601cf000000000636022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000363019f000000000034043500030000000103550000006001100270000102b70010019d00000000010200190000000200000005000000000001042d0002000000000002000200000006001d000100000005001d000002b705000041000002b70630009c00000000030580190000004003300210000002b70640009c00000000040580190000006004400210000000000334019f000002b70410009c0000000001058019000000c001100210000000000113019f0ad70ace0000040f000000010800002900000002040000290000001f0340018f0000000504400270000000000540004c0000033e0000613d000000000500001900000005065002100000000007680019000000000661034f000000000606043b00000000006704350000000105500039000000000645004b000003360000413d000000010220018f000000000530004c0000034e0000613d0000000504400210000000000541034f00000000044800190000000303300210000000000604043300000000063601cf000000000636022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000363019f000000000034043500030000000103550000006001100270000102b70010019d00000000010200190000000200000005000000000001042d000002b703000041000002b70410009c00000000010380190000004001100210000002b70420009c00000000020380190000006002200210000000000112019f0000000002000414000002b70420009c0000000002038019000000c002200210000000000112019f000002d4011001c700008010020000390ad70ace0000040f0000000102200190000003680000613d000000000101043b000000000001042d000000000100001900000000020000190ad703a50000040f0000000003010019000002b7010000410000000004000414000002b70540009c0000000001044019000000c00110021000000060022002100000000001120019000002d60110004100000000020300190ad70ace0000040f00000001022001900000037a0000613d000000000101043b000000000001042d000000000100001900000000020000190ad703a50000040f0000004402100039000000000300041400000060040000390000000000420435000002d70200004100000000002104350000006402100039000000000002043500000004021000390000000000020435000002b702000041000002b70430009c0000000003028019000002b70410009c00000000010280190000004001100210000000c002300210000000000112019f000002d8011001c700008006020000390ad70ac90000040f0000000102200190000003960000613d000000000101043b0000039a0000013d00030000000103550000006001100270000102b70010019d0000000001000019000000000001042d000002b704000041000002b70510009c000000000104801900000040011002100000000001310019000002b70320009c00000000020480190000006002200210000000000121001900000ad80001042e000002b703000041000002b70420009c0000000002038019000002b70410009c000000000103801900000040011002100000006002200210000000000112019f00000ad900010430000002d90210009c000003b40000813d000000600110003900000040020000390000000000120435000000000001042d000002da010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f0000004002100039000002db030000410000000000320435000000200210003900000017030000390000000000320435000000200200003900000000002104350000006001100039000000000001042d0000006002100039000002dc0300004100000000003204350000004002100039000002dd030000410000000000320435000000200210003900000026030000390000000000320435000000200200003900000000002104350000008001100039000000000001042d0000004002100039000002de03000041000000000032043500000020021000390000002003000039000000000032043500000000003104350000006001100039000000000001042d0000006002100039000002df0300004100000000003204350000004002100039000002e0030000410000000000320435000000200210003900000022030000390000000000320435000000200200003900000000002104350000008001100039000000000001042d0000004002100039000002e103000041000000000032043500000020021000390000002003000039000000000032043500000000003104350000006001100039000000000001042d0000006002100039000002e20300004100000000003204350000004002100039000002e303000041000000000032043500000020021000390000002c030000390000000000320435000000200200003900000000002104350000008001100039000000000001042d00080000000000020000000001000412000002d0021001970000000001000410000800000002001d000000000112004b000005dc0000c13d0ad706e60000040f0ad707770000040f0000000c010000390ad70ad50000040f000500000001001d000000000110004c000005eb0000613d000002e60100004100000000001004390000000401000039000300000001001d00000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d0000004001000039000700000001001d0000000002010433000002e701000041000600000002001d000000000012043500000000010004140000000802000029000000040320008c000005ce0000613d00000004040000390000000603000029000400000004001d000000000503001900000000060000190ad702ea0000040f000000000110004c000005ce0000c13d0000000c010000390ad70ad50000040f0000000502000029000000000121004b000005fa0000c13d000002e6010000410000000000100439000000080200002900000004010000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002e9010000410000000000130435000000000100041400000004040000390000000802000029000600000004001d000500000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006090000c13d0ad70a100000040f00000000010004140000000003000416000000000230004c0000045d0000613d000002b702000041000002b70410009c0000000001028019000000c001100210000002d4011001c70000800902000039000002eb0400004100000000050000190ad70ac90000040f000000000301034f000000010120018f000300000003035500000000020300190000006002200270000102b70020019d000002b702200197000004640000013d000002eb0200004100000000030000190000000004000019000000000500001900000000060000190ad702ea0000040f0000000102000031000000000320004c0000000809000029000004950000613d0000003f03200039000000200400008a000000000443016f000000070700002900000000030704330000000004430019000000000534004b00000000050000190000000105004039000002ea0640009c000006a30000213d0000000105500190000006a30000c13d000000000047043500000000002304350000002002300039000000030300036700000001050000310000001f0450018f0000000505500270000000000650004c000004860000613d000000000600001900000005076002100000000008720019000000000773034f000000000707043b00000000007804350000000106600039000000000756004b0000047e0000413d000000000640004c000004950000613d0000000505500210000000000353034f00000000025200190000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f0000000000320435000000000110004c000006170000613d0000000001000414000500000001001d000002e601000041000000000010043900000006010000290000000000910439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d000000070100002900000000030104330000002401300039000002b7020000410000000000210435000002ec0100004100000000001304350000000401300039000200000001001d00000000000104350000000501000029000027102110011a00000044040000390000000802000029000400000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006260000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d000000070100002900000000030104330000002401300039000000010200008a000400000002001d0000000000210435000002ec0100004100000000001304350000000401300039000100000001001d00000000000104350000000501000029000027102110011a00000044040000390000000802000029000200000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006350000c13d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002ed01000041000000000013043500000004043000390000000001000414000002b702000041000200000004001d000000000024043500000024040000390000000802000029000500000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006440000c13d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002ed01000041000000000013043500000004043000390000000001000414000002ee02000041000200000004001d000000000024043500000024040000390000000802000029000500000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006530000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002ef0100004100000000001304350000000402300039000002f001000041000100000002001d00000000001204350000000501000029000027102110011a00000024040000390000000802000029000200000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006620000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002ef0100004100000000001304350000000402300039000002b701000041000100000002001d00000000001204350000000501000029000027102110011a00000024040000390000000802000029000200000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006710000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002ef01000041000000000013043500000004023000390000000401000029000100000002001d00000000001204350000000501000029000027102110011a00000024040000390000000802000029000200000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c000006800000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d000000070100002900000000030104330000006401300039000002f102000041000000000021043500000044013000390000000000210435000002f2010000410000000000130435000000240130003900000000000104350000000401300039000200000001001d00000000000104350000000501000029000027102110011a00000084040000390000000802000029000400000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c0000068f0000c13d0000000001000414000500000001001d000002e6010000410000000000100439000000060100002900000008020000290000000000210439000080020100003900000024020000390ad7036b0000040f000000000110004c000005cb0000613d00000007010000290000000003010433000002f20100004100000000001304350000002401300039000002f10200004100000000002104350000000401300039000400000001001d000000000021043500000064013000390000000000010435000000440130003900000000000104350000000501000029000027102110011a00000084040000390000000802000029000600000003001d000000000503001900000000060000190ad702ea0000040f000000000110004c0000069e0000c13d00000007010000290000000001010433000002f30210009c000006a30000213d0000002402100039000002f40300004100000000003204350ad7037d0000040f000000000210004c000006ab0000613d000002d005100197000000000150004c000006cd0000613d000002b7010000410000000002000414000002b70320009c0000000001024019000000c001100210000002d4011001c70000800d020000390000000303000039000002f50400004100000000060004110ad70ac90000040f0000000101200190000005cb0000613d0000000800000005000000000001042d000000000100001900000000020000190ad703a50000040f0000000601000029000002f80110009c0000000701000029000006a30000813d00000006020000290000000000210435000002e501000041000000000012043500000004012000390ad703bc0000040f0000000603000029000000000231004900000000010300190ad703a50000040f000000400100003900000000010104330000004402100039000002e4030000410000000000320435000000240210003900000010030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f000000400100003900000000010104330000004402100039000002f9030000410000000000320435000000240210003900000018030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f000000070100002900000000010104330000004402100039000002e803000041000000000032043500000024021000390000001b030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f0000000501000029000002ea0110009c0000000701000029000006a30000213d00000005020000290000000000210435000002e501000041000000000012043500000004012000390ad703c60000040f0000000503000029000000000231004900000000010300190ad703a50000040f000000070100002900000000010104330000004402100039000002f7030000410000000000320435000000240210003900000014030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f00000002020000290000000401000029000002ea0110009c0000000701000029000006a30000213d00000004030000290000000000310435000002e501000041000000000013043500000000010200190ad703d30000040f0000000403000029000000000231004900000000010300190ad703a50000040f00000001020000290000000201000029000002ea0110009c0000000701000029000006a30000213d00000002030000290000000000310435000002e501000041000000000013043500000000010200190ad703d30000040f0000000203000029000000000231004900000000010300190ad703a50000040f00000002020000290000000501000029000002ea0110009c0000000701000029000006a30000213d00000005030000290000000000310435000002e501000041000000000013043500000000010200190ad703dc0000040f0000000503000029000000000231004900000000010300190ad703a50000040f00000002020000290000000501000029000002ea0110009c0000000701000029000006a30000213d00000005030000290000000000310435000002e501000041000000000013043500000000010200190ad703dc0000040f0000000503000029000000000231004900000000010300190ad703a50000040f00000001020000290000000201000029000002ea0110009c0000000701000029000006a30000213d00000002030000290000000000310435000002e501000041000000000013043500000000010200190ad703e90000040f0000000203000029000000000231004900000000010300190ad703a50000040f00000001020000290000000201000029000002ea0110009c0000000701000029000006a30000213d00000002030000290000000000310435000002e501000041000000000013043500000000010200190ad703e90000040f0000000203000029000000000231004900000000010300190ad703a50000040f00000001020000290000000201000029000002ea0110009c0000000701000029000006a30000213d00000002030000290000000000310435000002e501000041000000000013043500000000010200190ad703e90000040f0000000203000029000000000231004900000000010300190ad703a50000040f00000002020000290000000401000029000002ea0110009c0000000701000029000006a30000213d00000004030000290000000000310435000002e501000041000000000013043500000000010200190ad703f20000040f0000000403000029000000000231004900000000010300190ad703a50000040f00000004020000290000000601000029000002ea0110009c0000000701000029000006dc0000a13d000002da010000410000000000100435000000410100003900000003020000290000000000120435000000240200003900000000010000190ad703a50000040f000000030200036700000001040000310000001f0340018f000000070100002900000000010104330000000504400270000000000540004c000006bc0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000006b40000413d000000000530004c000006cb0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310ad703a50000040f000000070100002900000000010104330000004402100039000002f6030000410000000000320435000000240210003900000019030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f00000006030000290000000000310435000002e501000041000000000013043500000000010200190ad703f20000040f0000000603000029000000000231004900000000010300190ad703a50000040f0004000000000002000002fa0100004100000000001004390000800b0100003900000004020000390ad7036b0000040f0000000002000411000000000121004b000007740000c13d0000000001000032000007740000c13d000002fb0100004100000000001004390000800b0100003900000004020000390ad7036b0000040f000000000110004c000007120000613d000002fb0100004100000000001004390000800b010000390000000402000039000100000002001d0ad7036b0000040f000000000110004c0000076c0000613d000002fb01000041000400000001001d00000000001004390000800b01000039000300000001001d0000000402000039000200000002001d0ad7036b0000040f00000004020000290000000000200439000400000001001d000000030100002900000002020000290ad7036b0000040f000003e8011000390000000402000029000000000112004b0000076c0000213d000002fc0100004100000000001004390000800b01000039000300000001001d0000000402000039000400000002001d0ad7036b0000040f0000000a020000390ad70ad30000040f000002fd010000410000000000100439000000030100002900000004020000290ad7036b0000040f000100000001001d0000000901000039000200000001001d0ad70ad50000040f0000000102000029000002d002200197000002fe01100197000000000121019f00000002020000290ad70ad30000040f000002ff010000410000000000100439000000030100002900000004020000290ad7036b0000040f00000008020000390ad70ad30000040f00000300010000410000000000100439000000030100002900000004020000290ad7036b0000040f00000007020000390ad70ad30000040f00000301010000410000000000100439000000030100002900000004020000290ad7036b0000040f00000006020000390ad70ad30000040f00000302010000410000000000100439000000030100002900000004020000290ad7036b0000040f00000003020000390ad70ad30000040f000002fa010000410000000000100439000000030100002900000004020000290ad7036b0000040f000200000001001d00000004010000290ad70ad50000040f0000000202000029000002d002200197000002fe01100197000000000121019f00000004020000290ad70ad30000040f0000000201000367000000000101043b000100000001001d0000000201000039000200000001001d0ad70ad50000040f0000000102000029000000e0022002700000030301100197000000000121019f00000002020000290ad70ad30000040f000002fb010000410000000000100439000000030100002900000004020000290ad7036b0000040f00000005020000390ad70ad30000040f00000000010004160000000b020000390ad70ad30000040f0000000400000005000000000001042d000002da010000410000000000100435000000110100003900000001020000290000000000120435000000240200003900000000010000190ad703a50000040f000000000100001900000000020000190ad703a50000040f000a0000000000020000000001000414000100000001001d0000004001000039000400000001001d0000000003010433000003040130009c000008640000813d000000a001300039000000040200002900000000001204350000008001300039000003050200004100000000002104350000006001300039000003060200004100000000002104350000004001300039000003070200004100000000002104350000002001300039000003080200004100000000002104350000007a0100003900000000001304350000000002000019000000030120008c000a00000003001d0000082d0000213d000200000002001d000000000203043300000020013000390ad703540000040f0000000a0700002900000004020000290000000003020433000900000001001d000000000100041400000000040704330000000002000019000000000542004b000007a70000813d000000000532001900000020022000390000000006720019000000000606043300000000006504350000079f0000013d000000000234001900000000000204350000000202000039000000200600003900000000050000190ad7031f0000040f000000000110004c0000086c0000613d00000000020004330000000a0800002900000000010804330000000403000029000000000703043300000020037000390000000004000019000000000514004b000007be0000813d00000000053400190000002004400039000000000684001900000000060604330000000000650435000007b60000013d0000000904000029000000000442013f0000000002310019000500000004001d0000000000420435000000200210003900000000002704350000005f01100039000000200200008a000000000221016f000300000007001d0000000001720019000000000221004b00000000020000190000000102004039000002ea0310009c000008640000213d0000000102200190000008640000c13d000000040200002900000000001204350000000c01000039000a00000001001d0ad70ad50000040f0000000003010019000002ea0130009c000008640000213d00000001013000390000000a02000029000900000003001d0ad70ad30000040f0000000a01000029000000000010043500000009010000290000030c0210004100000005010000290ad70ad30000040f0000000a010000290ad70ad50000040f000000000210004c0000085c0000613d000000010110008a000000000210004c000008190000613d000a00000001001d0ad708b00000040f000800000002001d0ad70ad50000040f000600000001001d0000000a01000029000000010110008a0000000101100270000900000001001d0ad708b00000040f000700000002001d0ad70ad50000040f00000007020000290000000302200210000000000121022f000000ff0220008c000000000100201900000008020000290000000302200210000000ff0320008c0000000a03000029000008190000213d0000000604000029000000000224022f000000000112004b000008190000a13d00000009010000290ad708b00000040f000800000002001d0ad70ad50000040f000700000001001d0000000a010000290ad708b00000040f000000080300002900000003033002100000000704000029000000000434022f000000ff0330008c000000000304001900000000030020190ad708c60000040f00000009010000290ad708b00000040f00000005030000290ad708c60000040f0000000901000029000007e80000013d0000000c010000390ad70ad50000040f000000000210004c0000085c0000613d000000010210008a0000000001210170000008270000c13d0000000d010000390ad70ad50000040f000002ea01100197000002ea0210009c0000085c0000613d00000001011000390ad708dc0000040f00000003010000290ad709ab0000040f000000020200002900000001022000390000000303000029000007910000013d0ad708e90000040f0000000007000414000000040100002900000000010104330000000102000029000000000272004b0000088e0000a13d0000000a0600002900000000020604330000000003000019000000000423004b0000083f0000813d00000000041300190000002003300039000000000563001900000000050504330000000000540435000008370000013d00000000031200190000000000030435000a00000007001d0ad703540000040f00000001020000290000000a0300002900000000023200490000000403000029000000000303043300000000002304350000000005010019000002b7010000410000000002000414000002b70420009c0000000002018019000002b70430009c00000000010340190000004001100210000000c002200210000000000112019f0000030a011001c70000800d0200003900000002030000390000030b040000410ad70ac90000040f00000001012001900000089b0000613d0000000a00000005000000000001042d000002da010000410000000000100435000000110100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f000002da010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f000000030200036700000001040000310000001f0340018f000000040100002900000000010104330000000504400270000000000540004c0000087d0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000008750000413d000000000530004c0000088c0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310ad703a50000040f000000440210003900000309030000410000000000320435000000240210003900000012030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f000000000100001900000000020000190ad703a50000040f00010000000000020000000c01000039000100000001001d0ad70ad50000040f000000000110004c000008a80000613d000000010100002900000000001004350000000100000005000000000001042d000002da010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f0002000000000002000200000001001d0000000c01000039000100000001001d0ad70ad50000040f0000000202000029000000000121004b000008be0000a13d000000010100002900000000001004350000030c0120004100000000020000190000000200000005000000000001042d000002da010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f0003000000000002000200000003001d000100000002001d000300000001001d0ad70ad50000040f00000001020000290000000302200210000000010300008a00000000042301cf000000ff0520008c0000000004002019000000020500002900000000022501cf0000000002002019000000000242016f000000000334013f000000000131016f000000000121019f00000003020000290ad70ad30000040f0000000300000005000000000001042d0002000000000002000100000001001d0000000d01000039000200000001001d0ad70ad50000040f0000000102000029000002ea022001970000030d01100197000000000121019f00000002020000290ad70ad30000040f0000000200000005000000000001042d00080000000000020000000c010000390ad70ad50000040f000700000001001d000000000110004c000009940000613d0ad7089e0000040f0000030c01000041000800000001001d0ad70ad50000040f0000000701000029000000010110008a0ad708b00000040f0ad70ad50000040f000700000001001d0ad7089e0000040f00000008010000290ad70ad50000040f000000070100002900000008020000290ad70ad30000040f0000000c01000039000200000001001d0ad70ad50000040f000000000210004c000009a30000613d00000002020000290000000000200435000800000001001d0000030e0210004100000000010000190ad70ad30000040f0000000801000029000000010110008a00000002020000290ad70ad30000040f000000010100008a000100000001001d0000000003000019000700000003001d000002cf010000410000000102000029000000000223004b00000000020000190000000002012019000002cf03300197000002cf0430009c0000000001008019000002cf03300167000002cf0330009c000000000102c019000000000110004c0000098c0000613d00000002010000290ad70ad50000040f0000000704000029000000010240021000000001032001bf000800000003001d000000000313004b0000097b0000813d0000000202200039000000000112004b000009430000813d0000000801000029000600000002001d0ad708b00000040f000500000002001d0ad70ad50000040f000400000001001d00000006010000290ad708b00000040f000300000002001d0ad70ad50000040f000000050200002900000003022002100000000403000029000000000323022f000000ff0220008c0000000002030019000000000200201900000003030000290000000303300210000000ff0430008c0000000604000029000009430000213d000000000131022f000000000121004b000009430000a13d000800000004001d00000008010000290ad708b00000040f000600000002001d0ad70ad50000040f000400000001001d00000007010000290ad708b00000040f000500000002001d0ad70ad50000040f000000070400002900000005020000290000000302200210000000000121022f000000ff0220008c000000000100201900000006020000290000000302200210000000ff0320008c0000097b0000213d0000000403000029000000000223022f000000000112004b0000097b0000a13d00000000010400190ad708b00000040f000600000002001d0ad70ad50000040f000500000001001d0000000801000029000800000001001d0ad708b00000040f000400000002001d0ad70ad50000040f000300000001001d00000007010000290ad708b00000040f000000040300002900000003033002100000000304000029000000000434022f000000ff0330008c000000000304001900000000030020190ad708c60000040f00000008010000290ad708b00000040f000000060300002900000003033002100000000504000029000000000434022f000000ff0330008c000000000304001900000000030020190ad708c60000040f0000000803000029000009100000013d0000000c010000390ad70ad50000040f0000000102000029000000000221004b0000098c0000613d000000010210003900000000011201700000098a0000c13d0000000d010000390ad70ad50000040f000002ea01100197000000010110008a000002ea0210009c0000098c0000213d0ad708dc0000040f0000000800000005000000000001042d000002da010000410000000000100435000000110100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f0000004001000039000000000101043300000044021000390000030f030000410000000000320435000000240210003900000001030000390000000000320435000002e502000041000000000021043500000004021000390000002003000039000000000032043500000064020000390ad703a50000040f000002da010000410000000000100435000000310100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f00020000000000020000004002000039000200000002001d00000000030204330000031002000041000000000023043500000004023000390000002004000039000000000042043500000000020104330000002404300039000000000024043500000044073000390000000004000019000000000524004b000009c10000813d00000000057400190000002004400039000000000614001900000000060604330000000000650435000009b90000013d000000000172001900000000000104350000001f01200039000000200200008a000000000121016f00000044041000390000000001000414000080080200003900000020060000390000000005030019000100000003001d0ad702ea0000040f0000000104000031000000000110004c000009e40000613d000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000001010000290000000001120019000000000221004b00000000020000190000000102004039000002ea0310009c00000a050000213d000000010220019000000a050000c13d00000002020000290000000000120435000000200140008c00000a0d0000413d0000000200000005000000000001042d00000003030003670000001f0240018f000000020100002900000000010104330000000504400270000000000540004c000009f40000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000009ec0000413d000000000520004c00000a030000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310ad703a50000040f000002da010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f000000000100001900000000020000190ad703a50000040f000300000000000200000040050000390000000001050433000002d90210009c00000a6e0000813d00000060021000390000000000250435000003110200004100000000002104350000000002050433000003120320009c00000a6e0000213d0000008003200039000000000035043500000060032000390000031304000041000000000043043500000040032000390000031404000041000000000043043500000020032000390000031504000041000000000043043500000041060000390000000000620435000000400310003900000316040000410000000000430435000000200310003900000000002304350000000007050433000003170270009c00000a6e0000213d00000060027000390000000000250435000003180200004100000000002704350000000002050433000003120320009c00000a6e0000213d0000008003200039000000000035043500000060032000390000031304000041000000000043043500000040032000390000031904000041000000000043043500000020032000390000031a040000410000000000430435000000000062043500000040037000390000031b04000041000000000043043500000020037000390000000000230435000300000005001d000200000006001d000100000007001d0ad70a760000040f00000001010000290ad70a760000040f000000020500002900000003040000290000000001040433000003170210009c00000a6e0000213d000000600210003900000000002404350000031c0200004100000000002104350000000002040433000003120320009c00000a6e0000213d0000008003200039000000000034043500000060032000390000031d04000041000000000043043500000040032000390000031e04000041000000000043043500000020032000390000031f040000410000000000430435000000000052043500000020031000390000000000230435000000400210003900000000000204350ad70a760000040f0000000300000005000000000001042d000002da010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190ad703a50000040f0002000000000002000000200210003900000000030204330000000002030433000000410220008c00000aa40000c13d00000041023000390000000002020433000000ff0220018f0000001b0420008a000000010440008c00000aa40000213d00000040041000390000000004040433000200000004001d000000000101043300000020043000390000000004040433000000400330003900000000050304330000004003000039000100000003001d000000000303043300000060063000390000000000560435000000400530003900000000004504350000002004300039000000000024043500000000001304350000000000000435000000000100041400000001020000390000008004000039000000200600003900000000050000190ad7031f0000040f000000000110004c00000aa70000613d00000000010004330000000202000029000000000121013f000002d00110019800000aa40000c13d0000000200000005000000000001042d000000000100001900000000020000190ad703a50000040f000000030200036700000001040000310000001f0340018f000000010100002900000000010104330000000504400270000000000540004c00000ab80000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b00000ab00000413d000000000530004c00000ac70000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310ad703a50000040f00000acc002104210000000102000039000000000001042d0000000002000019000000000001042d00000ad1002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d00000ad70000043200000ad80001042e00000ad9000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006603c2f10000000000000000000000000000000000000000000000000000000002d05d3f000000000000000000000000000000000000000000000000000000000738693d0000000000000000000000000000000000000000000000000000000019d8ac610000000000000000000000000000000000000000000000000000000022844fbc000000000000000000000000000000000000000000000000000000002abbd748000000000000000000000000000000000000000000000000000000003b29037c000000000000000000000000000000000000000000000000000000004840a05100000000000000000000000000000000000000000000000000000000579ae3ec0000000000000000000000000000000000000000000000000000000059308f0f000000000000000000000000000000000000000000000000000000005efe4bb4000000000000000000000000000000000000000000000000000000007737dde7000000000000000000000000000000000000000000000000000000007b494b6e000000000000000000000000000000000000000000000000000000007fd5946100000000000000000000000000000000000000000000000000000000874e8f8f00000000000000000000000000000000000000000000000000000000993a04b700000000000000000000000000000000000000000000000000000000ab25690f00000000000000000000000000000000000000000000000000000000aea34ae800000000000000000000000000000000000000000000000000000000af640d0f00000000000000000000000000000000000000000000000000000000be8b112000000000000000000000000000000000000000000000000000000000c6f9688400000000000000000000000000000000000000000000000000000000fb38aa568000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff54657374206d65737361676520320000000000000000000000000000000000003ea98af6e35141fbcacc1724e14f5d76b9b58e41f6c35d0e8ae2e204e66695eb993a04b70000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000449bf97d987c61f24f2a6bbac8f2e426eab123cdfadfc01364acfff36c658d902000002000000000000000000000000000000000000000000000000000000009c4d535bdea7cd8a978f128b93471df48c7dbab89d703809115bdc118c235bfd0200000000000000000000000000000000000084000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffa04e487b71000000000000000000000000000000000000000000000000000000006865617020746573742073686f756c64206661696c65640000000000000000006661696c6564000000000000000000000000000000000000000000000000000073656e64696e67206c31206d6573736167657320746573742073686f756c642072657475726e206d656d6f727920746573742073686f756c64206661696c656465640000000000000000000000000000000000000000000000000000000000006163636573732063616c6c6461746120746573742073686f756c64206661696c616363657373206d656d6f727920746573742073686f756c64206661696c6564686f756c64206661696c656400000000000000000000000000000000000000007261772063616c6c2074657374207769746820626967206f7574207075742073696e2064656c65676174652063616c6c0000000000000000000000000000000008c379a0000000000000000000000000000000000000000000000000000000001806aa1896bbf26568e884a7374b41e002500962caba6a15023a8d90e8508b836603c2f100000000000000000000000000000000000000000000000000000000486561702073686f756c64206e6f74206265206d6f6469666965640000000000ab25690f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeaea34ae8000000000000000000000000000000000000000000000000000000007b494b6e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000738693d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffff00000000000000000000000000000000000000000000000000000000800000003b29037c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff7b01000035cd4a1b3f84331419050d6ae2d93c44ec0563f2f82bd890769ed5abec2d49c67975aadd2d389580b368cfff5b49965b0bd5da33c144922ce01e7a4d7b4661696c656420746f206465706c6f7920636f6e7472616374000000000000006661696c6564207472616e736665722063616c6c0000000000000000000000000000000000000000000000000000000000000000000000010000000000000000486561702073686f756c64206e6f7420626520656d7074790000000000000000938b5f3299a1f3b18e458564efbb950733226014eece26fae19012d850b48d8342cbb15ccdc3cad6266b0e7a08c0454b23bf29dc2df74b6f3c209e9336465bd119cae4629a2dd7890036d0d1f6a82742845b778b7184e38d5bebfd4cce3b181ea6ae0aac158b2d5c9a9c9285743419d62a32f6727a640955e4ce8ee41503c784ffffffffffffffffffffffff00000000000000000000000000000000000000007877a797fe6dca4321f33fd95414da079ab78e698d761514c01ced9211af267efe173b97ed9aa263236c52fa3eb334d07741add95e972d17352d76816b4aaea39a8a0592ac89c5ad3bc6df8224c17b485976f597df104ee20d0df415241f670b796b89b91644bc98cd93958e4c9038275d622183e25ac5af08cc6b5d95539132ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000ffffffffffffff6074686520696e6475737472792773207374616e646172642e2e2e00000000000020696e6475737472792e204c6f72656d20497073756d20686173206265656e20206f6620746865207072696e74696e6720616e64207479706573657474696e674c6f72656d20497073756d2069732073696d706c792064756d6d792074657874536f6d65206572726f72206d6573736167650000000000000000000000000000020000000000000000000000000000000000002000000000000000000000000085bd2d2aa0e5528cca3248dfb1e992d0113a553802d7924fdf049ae9ed1d5b30df6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c7ffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000df6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c6770000000000000000000000000000000000000000000000000000000000000062f84b240000000000000000000000000000000000000000000000000000000014431339128bd25f2c7f93baa611e367472048757f4ad67f6d71a5ca0da550f5000000000000000000000000000000000000000000000000ffffffffffffff7f1c0000000000000000000000000000000000000000000000000000000000000046eabf35680328e26ef4579caf8aeb2cf9ece05dbf67a4f3d1f28c7b1d0e354651e4dbbbcebade695a3f0fdf10beb8b5f83fda161e1a3105a14c41168bf3dce00000000000000000000000007f8b3b04bf34618f4a1723fba96b5db211279a2b000000000000000000000000000000000000000000000000ffffffffffffff9fe0682fd4a26032afff3b18053a0c33d2a6c465c0e19cb1e4c10eb0a949f2827c0bdb5f0ac79d1a7efdc255f399a045038c1b433e9d06c1b1abd58a5fcaab33f1c46cdc50a66f4d07c6e9a127a7277e882fb21bcfb5b068f2b58c7f7283993b790000000000000000000000000865a77d4d68c7e3cdd219d431cfee9271905074dd69e9950f52dddcbc6751fdbb6949787cc1b84ac4020ab0617ec8ad950e554a1b000000000000000000000000000000000000000000000000000000000000004068f5b5e6c4b442e83fcb7b6290520ebb5e077cd10d3bd86cf431ca4b640162b00986d8bb52ee7acb06cabfa6c2c099d8904c7c8d56707a267ddbafd7aed0700000000000000000000000000000000000000000000000000000000000000000", + "default_account_code": "0x0004000000000002000700000000000200000000030100190000006003300270000003540430019700030000004103550002000000010355000003540030019d000100000000001f00000080010000390000004007000039000000000017043500000001012001900000005b0000c13d0000000001000031000000040210008c000000660000413d0000000202000367000000000302043b000000e003300270000003560430009c000000000a00041100000000090004120000000008000410000000740000613d000003570430009c0000008c0000613d000003580430009c000000b30000613d000003590430009c000000c50000613d0000035a0330009c0000006c0000c13d000000040310008a0000035b04000041000000200530008c000000000500001900000000050440190000035b03300197000000000630004c000000000400a0190000035b0330009c00000000030500190000000003046019000000000330004c000000cb0000c13d0000000402200370000000000602043b0000035c0260009c000000cb0000213d000000040560003900000000015100490000035b02000041000002600310008c000000000300001900000000030240190000035b01100197000000000410004c000000000200a0190000035b0110009c00000000010300190000000001026019000000000110004c000000cb0000c13d0000800101a0008c000000680000c13d0000035d09900197000000000189004b000000680000c13d0000000001000414000000000207043300000020032000390000035e04000041000000000043043500000104036000390000000203300367000000000303043b00000024042000390000000000340435000000240300003900000000003204350000035f0320009c000000f60000a13d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f0000000001000416000000000110004c000000cb0000c13d00000020020000390000010001000039000000000021043900000120020000390000000000020439000000400200003900000355030000410d4902960000040f000000000110004c0000006c0000c13d0000000001000019000000000200001900000000030000190d4902960000040f0000000001000416000000000110004c000000cb0000c13d0d4905bf0000040f0000000001000019000000000200001900000000030000190d4902960000040f000500000009001d000600000008001d000400000007001d00070000000a001d0d4902a90000040f0000000701000029000080010110008c000000680000c13d00000005010000290000035d011001970000000604000029000000000141004b000000680000c13d000000000102001900000000020300190d49042e0000040f00000004020000290000000003020433000003640110019700000000001304350000002002000039000000000103001900000000030000190d4902960000040f000500000009001d000600000008001d000400000007001d00070000000a001d0d4902a90000040f0000000701000029000080010110008c000000680000c13d00000005010000290000035d021001970000000601000029000000000112004b000000680000c13d000600000002001d00000220023000390000000001030019000700000002001d000300000003001d0d4903300000040f000000030120008c000000c60000213d0000000401000029000000000101043300000064021000390000036f03000041000000000032043500000044021000390000037003000041000000000032043500000024021000390000003a0300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000084020000390d4902a00000040f000500000009001d000600000008001d00070000000a001d0d4902a90000040f0000000701000029000080010110008c000000680000c13d00000005010000290000035d011001970000000602000029000000000121004b000000680000c13d00000000010300190d49049f0000040f0000000001000019000000000200001900000000030000190d4902960000040f0d4902d10000040f000000030100002900000007020000290d4903300000040f000000040220008c000000ce0000813d000000000100001900000000020000190d4902a00000040f0000000201100367000000000101043b0000036401100197000003650210009c000000e90000c13d000000030100002900000007020000290d4903300000040f000000430120008c000001390000213d0000000404000029000000000104043300000064021000390000036d03000041000000000032043500000044021000390000036e030000410000000000320435000000240210003900000000004204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000084020000390d4902a00000040f000003660110009c000000680000613d00000004010000290000000002010433000700000002001d0000036701000041000000000012043500000004012000390d490c390000040f0000000703000029000000000231004900000000010300190d4902a00000040f000500000009001d000000600320003900000000003704350000035401100197000700000005001d000600000006001d0d4905cb0000040f00000007010000290d49061b0000040f000400000001001d000000060100002900000044011000390000000201100367000000000101043b000080060110008c0000010e0000c13d0000000601000029000001c40210003900000007010000290d4903300000040f000000030120008c000000000100001900000001010020390d49035d0000040f00000007010000290d490c430000040f0000036002000041000000000020043900000004030000390000000502000029000300000003001d0000000000230439000500000001001d0000800a0100003900000024020000390d4902840000040f0000000502000029000000000112004b0000000001000019000000010100a0390d49036e0000040f0000000601000029000001e402100039000600000002001d00000007010000290d4903300000040f00000000030000310d4903830000040f000000070100002900000006020000290d4903300000040f000000000120004c0000018d0000c13d000000060100002900000007010000290d4903c40000040f0000000002010433000000410220008c000001860000213d00000363010000410000000000100435000000320100003900000003020000290000000000120435000000240200003900000000010000190d4902a00000040f000000030100002900000007020000290d4903300000040f000000440220008c000000cb0000413d00000004031000390000000202000367000000000332034f000000000603043b0000035d0360009c000000cb0000213d0000000303000029000000e003300039000000000332034f0000002401100039000000000112034f000000000101043b000500000001001d000000000203043b000000040500002900000000030504330000036801000041000000000013043500000004073000390000000001000414000000060400002900000000004704350000035d042001970000002402300039000300000004001d0000000000420435000000040260008c0000019b0000613d000000440400003900000000020600190000000005030019000700000006001d000200000003001d0d49024a0000040f000000020300002900000007060000290000000405000029000000000110004c0000019b0000c13d000000030200036700000001040000310000001f0340018f00000000010504330000000504400270000000000540004c000001750000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b0000016d0000413d000000000530004c000001840000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310d4902a00000040f00000061011000390000000002010433000003610220019700000362022001c7000000000021043500000007010000290000000601000029000000070100002900000006020000290d4903300000040f00000000030000310d4903830000040f000000000201001900000004010000290d4905410000040f00000007010000290d49049f0000040f0000000001000019000000000200001900000000030000190d4902960000040f0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600420018f00000000070300190000000002340019000000000342004b000000000300001900000001030040390000035c0420009c000000530000213d0000000103300190000000530000c13d0000000000250435000000200110008c000000cb0000413d000700000006001d00000000010704330000000503000029000000000131004b000000680000813d0000002001200039000003690300004100000000003104350000002401200039000000030300002900000000003104350000004401000039000100000001001d0000000000120435000000440120003900000000000104350000036a0120009c00000004030000290000000701000029000000530000213d000000800420003900000000004304350d490c7d0000040f000000070200002900000004050000290000000006050433000003680100004100000000001604350000002403600039000000000100041400000003040000290000000000430435000200000006001d000000040460003900000006030000290000000000340435000000040320008c000001da0000613d0000004404000039000000020300002900000000050300190d49024a0000040f0000000405000029000000000110004c000001650000613d0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600220018f00000002030000290000000003320019000000000223004b000000000200001900000001020040390000035c0430009c000000530000213d0000000102200190000000530000c13d0000000000350435000000200110008c000000cb0000413d00000002010000290000000001010433000000000110004c000002040000c13d000000200130003900000369020000410000000000210435000000440130003900000005020000290000000000210435000000240130003900000003020000290000000000210435000000010100002900000000001304350000036a0130009c00000004020000290000000701000029000000530000213d0000008004300039000000000042043500000000020300190d490c7d0000040f000000680000013d00000064013000390000036b02000041000000000021043500000044013000390000036c02000041000000000021043500000024013000390000003602000039000000000021043500000367010000410000000000130435000000040130003900000020020000390000000000210435000000840200003900000000010300190d4902a00000040f0002000000000002000200000006001d000100000005001d0000035405000041000003540630009c00000000030580190000004003300210000003540640009c00000000040580190000006004400210000000000334019f000003540410009c0000000001058019000000c001100210000000000113019f0d490d3f0000040f000000010800002900000002040000290000001f0340018f0000000504400270000000000540004c000002340000613d000000000500001900000005065002100000000007680019000000000661034f000000000606043b00000000006704350000000105500039000000000645004b0000022c0000413d000000010220018f000000000530004c000002440000613d0000000504400210000000000541034f00000000044800190000000303300210000000000604043300000000063601cf000000000636022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000363019f000000000034043500030000000103550000006001100270000103540010019d00000000010200190000000200000005000000000001042d0001000000000002000100000005001d0000035405000041000003540630009c00000000030580190000004003300210000003540640009c00000000040580190000006004400210000000000334019f000003540410009c0000000001058019000000c001100210000000000113019f0d490d440000040f0000000106000029000000010220018f000000000300001900000005043002100000000005460019000000000441034f000000000404043b00000000004504350000000103300039000000000430004c0000000004000019000000010400603900000001044001900000025c0000c13d00030000000103550000006001100270000103540010019d00000000010200190000000100000005000000000001042d0000035403000041000003540410009c00000000010380190000004001100210000003540420009c00000000020380190000006002200210000000000112019f0000000002000414000003540420009c0000000002038019000000c002200210000000000112019f00000371011001c700008010020000390d490d440000040f0000000102200190000002810000613d000000000101043b000000000001042d000000000100001900000000020000190d4902a00000040f000000000301001900000354010000410000000004000414000003540540009c0000000001044019000000c00110021000000060022002100000000001120019000003720110004100000000020300190d490d440000040f0000000102200190000002930000613d000000000101043b000000000001042d000000000100001900000000020000190d4902a00000040f0000035404000041000003540510009c000000000104801900000040011002100000000001310019000003540320009c00000000020480190000006002200210000000000121001900000d4a0001042e0000035403000041000003540420009c0000000002038019000003540410009c000000000103801900000040011002100000006002200210000000000112019f00000d4b00010430000000040210008a0000035b030000410000005f0420008c000000000400001900000000040320190000035b02200197000000000520004c00000000030080190000035b0220009c00000000020400190000000002036019000000000220004c000002ce0000613d00000002020003670000004403200370000000000303043b0000035c0430009c000002ce0000213d000000040330003900000000013100490000035b04000041000002600510008c000000000500001900000000050440190000035b01100197000000000610004c000000000400a0190000035b0110009c00000000010500190000000001046019000000000110004c000002ce0000c13d0000000401200370000000000101043b0000002402200370000000000202043b000000000001042d000000000100001900000000020000190d4902a00000040f000300000000000200000000010000310d4902a90000040f0000000001000411000080010110008c000003080000c13d00000000010004120000035d011001970000000002000410000000000121004b000003080000c13d000000a00230003900000000010004140000000204000367000000000524034f0000006002300039000000000224034f000000000202043b000000000405043b000000000340004c0000030c0000c13d0000000004000415000000030440008a00000020044000c9000300000000001d000100000004001d000080010200003900000000030000190000000004000019000000000500001900000000060000190d4902150000040f0000000103000029000000200230011a000000000201001f000000000110004c000003080000c13d000000400100003900000000010104330000006402100039000003730300004100000000003204350000004402100039000003740300004100000000003204350000002402100039000000250300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000084020000390d4902a00000040f0000000001000019000000000200001900000000030000190d4902960000040f00000000534200a900000000544300d9000000000224004b000003280000c13d0000000004000415000000020440008a00000020044000c9000200000000001d000000000230004c000002ea0000613d0000035402000041000003540410009c0000000001028019000000c00110021000000371011001c70000800902000039000080010400003900000000050000190d490d3f0000040f00000000030100190000006003300270000103540030019d0000000003000415000000020330008a00000020033000c90003000000010355000000010120018f000002f20000013d00000363010000410000000000100435000000110100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000000300003100000000041300490000001f0540008a0000000204000367000000000224034f000000000202043b0000035b06000041000000000752004b000000000700001900000000070640190000035b055001970000035b08200197000000000958004b000000000600a019000000000558013f0000035b0550009c00000000050700190000000005066019000000000550004c0000035a0000613d0000000001120019000000000214034f000000000202043b0000035c0420009c0000035a0000213d000000000323004900000020011000390000035b04000041000000000531004b000000000500001900000000050420190000035b033001970000035b06100197000000000736004b0000000004008019000000000336013f0000035b0330009c00000000030500190000000003046019000000000330004c0000035a0000c13d000000000001042d000000000100001900000000020000190d4902a00000040f000000000110004c000003600000613d000000000001042d00000040010000390000000001010433000000440210003900000375030000410000000000320435000003670200004100000000002104350000002402100039000000200300003900000000003204350000000402100039000000000032043500000064020000390d4902a00000040f000000000110004c000003710000613d000000000001042d000000400100003900000000010104330000006402100039000003760300004100000000003204350000004402100039000003770300004100000000003204350000002402100039000000220300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000084020000390d4902a00000040f0000000004010019000003780120009c000003b90000813d0000003f01200039000000200500008a000000000651016f000000400500003900000000010504330000000006610019000000000716004b000000000700001900000001070040390000035c0860009c000003b90000213d0000000107700190000003b90000c13d000000000065043500000000002104350000000005420019000000000335004b000003c10000213d0000001f0520018f000000020440036700000020031000390000000506200270000000000760004c000003a70000613d000000000700001900000005087002100000000009830019000000000884034f000000000808043b00000000008904350000000107700039000000000867004b0000039f0000413d000000000750004c000003b60000613d0000000506600210000000000464034f00000000066300190000000305500210000000000706043300000000075701cf000000000757022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000474019f000000000046043500000000022300190000000000020435000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000000100001900000000020000190d4902a00000040f00000040020000390000000001020433000003790310009c000003d90000813d0000008003100039000000000032043500000041020000390000000000210435000000200210003900000000030000310000000203300367000000000400001900000005054002100000000006520019000000000553034f000000000505043b00000000005604350000000104400039000000030540008c000003d00000413d000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000400200003900000000010204330000037a0310009c000003f90000813d0000004003100039000000000032043500000001020000390000000000210435000000200210003900000000030000310000000203300367000000000400001900000005054002100000000006520019000000000553034f000000000505043b00000000005604350000000104400039000000000540004c000000000500001900000001050060390000000105500190000003ed0000c13d000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f0000000002010019000003780120009c000004260000813d0000003f01200039000000200300008a000000000431016f000000400300003900000000010304330000000004410019000000000514004b000000000500001900000001050040390000035c0640009c000004260000213d0000000105500190000004260000c13d000000000043043500000000002104350000001f022000390000000502200270000000000320004c000004230000613d000000200310003900000000040000310000000204400367000000000500001900000005065002100000000007630019000000000664034f000000000606043b00000000006704350000000105500039000000000625004b0000041b0000413d000000000200004c000004250000613d000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f00040000000000020000000006020019000300000001001d00000000010004140000004003000039000000000203043300000020042000390000035e050000410000000000540435000400000006001d00000100046000390000000204400367000000000404043b00000024052000390000000000450435000000240400003900000000004204350000037b0420009c0000048f0000813d0000006004200039000000000043043500000354011001970d4905cb0000040f0000000302000029000000000120004c00000000010200190000044b0000c13d00000004010000290d49061b0000040f000300000001001d000000040100002900000040021000390000000202200367000000000202043b000080060220008c000004590000c13d000001c0021000390d4903300000040f000000030120008c000000000100001900000001010020390d49035d0000040f00000004010000290d490c430000040f0000036002000041000000000020043900000000020004100000000403000039000100000003001d0000000000230439000200000001001d0000800a0100003900000024020000390d4902840000040f0000000202000029000000000112004b0000000001000019000000010100a0390d49036e0000040f0000000401000029000001e002100039000200000002001d0d4903300000040f00000000030000310d4903830000040f000000040100002900000002020000290d4903300000040f000000000120004c0000000401000029000004820000c13d000000020100002900000001010000290d4903c40000040f0000000002010433000000410220008c000004970000a13d00000061011000390000000002010433000003610220019700000362022001c700000000002104350000000401000029000000020200002900000002020000290d4903300000040f00000000030000310d4903830000040f000000000201001900000003010000290d4905410000040f0000037c02000041000000000110004c000000000102001900000000010060190000000400000005000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f00000363010000410000000000100435000000320100003900000001020000290000000000120435000000240200003900000000010000190d4902a00000040f0004000000000002000200000001001d00000120031000390000000202000367000000000432034f0000004003100039000000000232034f000000000202043b000400000002001d000000000104043b000300000001001d0000037d0110009c000000000100001900000001010040390d490c6b0000040f0000000201000029000001c0021000390d4903300000040f00000000030000310d4903830000040f00000003020000290000037e062001970000000002010019000000000700041400000004010000290000035d04100197000080060140008c000004e00000c13d0000037f0170009c00000000010000190000000101004039000300000006001d000100000007001d000400000002001d0d490c6b0000040f00000004010000290000000001010433000200000001001d0000037f0110009c000000000100001900000001010040390d490c6b0000040f00000003030000290000000101000029000000c00110021000000380011001970000000402000029000000400220021000000381022000410000038202200197000000000112019f000000020200002900000060022002100000038302200197000000000121019f00000384011001c7000000000230004c000004ff0000613d00008009020000390000800604000039000000010500003900000000060000190d490d3f0000040f000300000002001d000005060000013d000000040140008c0000053c0000613d00000000050204330000002003200039000000000160004c000005340000613d0000035401000041000003540230009c000000000201001900000000020340190000004002200210000003540350009c000000000301001900000000030540190000006003300210000000000223019f000003540370009c0000000001074019000000c001100210000000000112019f00000371011001c70000800902000039000000000306001900000000050000190d490d3f0000040f00000000030100190000006003300270000103540030019d0003000000010355000000010120018f0000053a0000013d000080060200003900000000030000190000000004000019000000000500001900000000060000190d490d3f0000040f000300000002001d00030000000103550000006001100270000103540010019d0000035401100197000400000001001d0d4904010000040f00000004050000290000000102000031000000000225004b0000053e0000213d000000200310003900000003040003670000001f0250018f0000000505500270000000000650004c0000051f0000613d000000000600001900000005076002100000000008730019000000000774034f000000000707043b00000000007804350000000106600039000000000756004b000005170000413d000000000620004c0000052e0000613d0000000505500210000000000454034f00000000055300190000000302200210000000000605043300000000062601cf000000000626022f000000000404043b0000010002200089000000000424022f00000000022401cf000000000262019f0000000000250435000000030200002900000001022001900000053c0000c13d000000000201043300000000010300190d4902a00000040f000000000107001900000000020400190000000004050019000000000500001900000000060000190d4902150000040f000000000110004c0000053e0000613d0000000400000005000000000001042d000000000100001900000000020000190d4902a00000040f00010000000000020000000003020433000000410330008c000005710000c13d00000041032000390000000003030433000000ff0430018f0000001d0340008a000000030500008a000000000353004b000005800000a13d00000040032000390000000005030433000000200220003900000000020204330000004003000039000100000003001d0000000003030433000003860650009c0000058f0000813d0000006006300039000000000056043500000040053000390000000000250435000000200230003900000000004204350000000000130435000000000000043500000000010004140000000102000039000000800400003900000000050000190d49024a0000040f000000000110004c0000059d0000613d00000000010004330000035d011001970000000002000410000000000221004b00000000020000190000000102006039000000000110004c0000000001000019000000010100c039000000000112016f000000010110018f0000000100000005000000000001042d0000004001000039000000000101043300000044021000390000038503000041000000000032043500000024021000390000001d0300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000064020000390d4902a00000040f000000400100003900000000010104330000004402100039000003880300004100000000003204350000002402100039000000160300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000064020000390d4902a00000040f00000044013000390000038702000041000000000021043500000024013000390000000902000039000000000021043500000367010000410000000000130435000000040130003900000020020000390000000000210435000000640200003900000000010300190d4902a00000040f000000030200036700000001040000310000001f0340018f000000010100002900000000010104330000000504400270000000000540004c000005ae0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000005a60000413d000000000530004c000005bd0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310d4902a00000040f0000000001000411000080010110008c000005c30000613d000000000001042d00000363010000410000000000100435000000010100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f0003000000000002000200000002001d000100000001001d0000000001020433000300000001001d0000037f0110009c000000000100001900000001010040390d490c6b0000040f0000000101000029000000c00110021000000380011001970000000202000029000000400220021000000381022000410000038202200197000000000121019f000000030200002900000060022002100000038302200197000000000121019f00000384011001c7000080030200003900000000030000190000000004000019000000000500001900000000060000190d490d3f0000040f000200000002001d00030000000103550000006001100270000103540010019d0000035401100197000300000001001d0d4904010000040f00000003050000290000000102000031000000000225004b000006150000213d000000200310003900000003040003670000001f0250018f0000000505500270000000000650004c000006010000613d000000000600001900000005076002100000000008730019000000000774034f000000000707043b00000000007804350000000106600039000000000756004b000005f90000413d000000000620004c000006100000613d0000000505500210000000000454034f00000000055300190000000302200210000000000605043300000000062601cf000000000626022f000000000404043b0000010002200089000000000424022f00000000022401cf000000000262019f000000000025043500000002020000290000000102200190000006180000613d0000000300000005000000000001042d000000000100001900000000020000190d4902a00000040f000000000201043300000000010300190d4902a00000040f000f0000000000020000000207000367000000000217034f000000000302043b000000000230004c0000010004100039000000a00610003900000060051000390000004002100039000a00000002001d0000012002100039000b00000002001d000001c002100039000f00000001001d000e00000002001d000d00000005001d000c00000006001d0000070b0000613d000900000004001d000000710430008c000000c004100039000007780000c13d000800000004001d0d4903300000040f00000000030000310d4903830000040f000000000201043300000020011000390d49026d0000040f0000000f020000290000020003200039000000000400003100000000022400490000001f0520008a0000000202000367000000000332034f000000000303043b0000035b06000041000000000753004b000000000700001900000000070640190000035b055001970000035b08300197000000000958004b000000000600a019000000000558013f0000035b0550009c00000000050700190000000005066019000000000a010019000000000150004c00000b200000613d0000000f010000290000000001130019000000000312034f000000000503043b0000035c0350009c00000b200000213d0000000503500210000000000434004900000020061000390000035b01000041000000000746004b000000000700001900000000070120190000035b044001970000035b08600197000000000948004b0000000001008019000000000448013f0000035b0440009c000000000107c019000000000110004c00000b200000c13d0000004001000039000e00000001001d000000000401043300000020014000390000038c05500197000000000750004c000006760000613d000000000262034f000000000600001900000005076002100000000008710019000000000772034f000000000707043b00000000007804350000000106600039000000000756004b0000066e0000413d000000000200004c000006780000613d00070000000a001d00000000003404350000003f02300039000000200300008a000000000232016f0000000002240019000000000342004b000000000300001900000001030040390000035c0520009c00000b180000213d000000010330019000000b180000c13d0000000e03000029000000000023043500000000020404330d49026d0000040f000600000001001d0000000f0100002900000220021000390d4903300000040f00000000030000310d4903830000040f000000000201043300000020011000390d49026d0000040f00000002020003670000000d03000029000000000632034f0000000a03000029000000000532034f0000000f0c0000290000002003c00039000000000432034f0000000b03000029000000000732034f0000000903000029000000000832034f000000e003c00039000000000932034f0000000803000029000000000a32034f0000000c03000029000000000b32034f0000008003c00039000000000d0c0019000000000c32034f0000000002d2034f000000000302043b000000000404043b000000000505043b000000000606043b000000000c0c043b000000000b0b043b000000000a0a043b000000000909043b000000000808043b000000000707043b0000000e020000290000000002020433000001c00d20003900000000001d0435000001a001200039000000060d0000290000000000d104350000018001200039000000070d0000290000000000d1043500000160012000390000000000710435000001400120003900000000008104350000012001200039000000000091043500000100012000390000000000a10435000000e0012000390000000000b10435000000c0012000390000000000c10435000000a0012000390000000000610435000000800120003900000000005104350000006001200039000000000041043500000040012000390000000000310435000001c00100003900000000001204350000038d03000041000000200120003900000000003104350000038e0320009c00000b180000213d000001e0032000390000000e04000029000000000034043500000000020204330d49026d0000040f0000000e020000290000000002020433000f00000002001d0000038a020000410000000000200439000d00000001001d0000800b0100003900000004020000390d4902840000040f0000038f020000410000000f040000290000006003400039000000000023043500000390020000410000004003400039000000000023043500000000030400190000008002300039000000000012043500000391020000410000002001300039000000000021043500000080020000390000000000230435000003920230009c00000b180000213d000000a0023000390000000e04000029000000000024043500000000020304330d49026d0000040f0000000e02000029000000000402043300000042024000390000000d03000029000000000032043500000393020000410000002003400039000000000023043500000022024000390000000000120435000000420100003900000000001404350000036a0140009c00000b180000213d000000800140003900000b110000013d000000000147034f000000000101043b00090000000703530d490b720000040f000000090200035f0000000c03000029000000000232034f000000000202043b000c00000001001d00000000010200190d490b720000040f0000000d02000029000000090300035f000000000223034f000000000202043b000d00000001001d00000000010200190d490b720000040f0000000d0700002900000000020704330000004009000039000000000809043300000020038000390000000004000019000000000524004b0000072b0000813d00000000053400190000002004400039000000000674001900000000060604330000000000650435000007230000013d0000000003320019000000000003043500000000040104330000000005000019000000000645004b000007370000813d000000000635001900000020055000390000000007150019000000000707043300000000007604350000072f0000013d00000000013400190000000000010435000000000124001900000000001804350000003f01100039000000200200008a000700000002001d000000000221016f000d00000008001d0000000001820019000000000221004b000000000200001900000001020040390000035c0310009c00000b180000213d000000010220019000000b180000c13d000900000009001d00000000001904350000000a010000290000000201100367000000000101043b0000035d011001970d490b5a0000040f0000000b020000290000000202200367000000000202043b000a00000001001d00000000010200190d490b720000040f000b00000001001d0000000f010000290000000e020000290d4903300000040f0000035c01200197000000010210008c000007ba0000c13d0000000f010000290000000e020000290d4903300000040f000000000220004c00000b230000613d0000000205000367000000000115034f000000000101043b000000010200008a0000035b03000041000000000221004b000000000200001900000000020320190000035b011001970000035b0410009c00000000030080190000035b011001670000035b0110009c000000000102001900000000010360190000006006000039000000000110004c000007bd0000c13d00080000000503530d490b460000040f000000080500035f0000000006010019000007bd0000013d000000020230008c000008ad0000c13d0000038a0100004100000000001004390000800b010000390000000402000039000800000004001d0d4902840000040f0d490b720000040f000000020300036700070000000303530000000902000029000000000223034f000000000202043b000500000001001d00000000010200190d490b720000040f000000070200035f0000000803000029000000000232034f000000000202043b000600000001001d00000000010200190d490b720000040f000000070200035f0000000c03000029000000000232034f000000000202043b000800000001001d00000000010200190d490b720000040f0000000d02000029000000070300035f000000000223034f000000000202043b000900000001001d00000000010200190d490b720000040f0000000a02000029000000070300035f000000000223034f000000000202043b0000035d02200197000c00000001001d00000000010200190d490b5a0000040f0000000b020000290000000202200367000000000202043b000d00000001001d00000000010200190d490b720000040f000000050700002900000000020704330000004009000039000000000809043300000020038000390000000004000019000000000524004b000008e70000813d00000000053400190000002004400039000000000674001900000000060604330000000000650435000007b20000013d0d490ba70000040f000000020500036700000000060100190000000f010000290000014002100039000000000225034f0000006007000039000000000202043b000000000220004c0000000d020000290000000c030000290000000b040000290000000a050000290000000908000029000600000006001d000007f40000613d0000038a0100004100000000001004390000800b0100003900000004020000390d4902840000040f0d490b720000040f00000009080000290000000007080433000000200370003900000000020104330000000004000019000000000524004b000007dd0000813d00000000053400190000002004400039000000000614001900000000060604330000000000650435000007d50000013d0000000001320019000003940300004100000000003104350000000201200039000000000017043500000041012000390000000702000029000000000221016f0000000001720019000000000221004b000000000200001900000001020040390000035c0310009c00000b180000213d000000010220019000000b180000c13d00000000001804350000000f010000290000000d020000290000000c030000290000000b040000290000000a050000290000000606000029000800000007001d0000000006060433000500000006001d0000000004040433000400000004001d0000000004050433000300000004001d0000000002020433000200000002001d0000000002030433000100000002001d0000000e020000290d4903300000040f00000002010000290000000103000029000000000131001900000003030000290000000001310019000000040300002900000000013100190000000503000029000000000131001900000000012100190000000802000029000000000202043300000000012100190000035c011001970d490be70000040f000500000001001d0000000f010000290000000e020000290d4903300000040f000000050900002900000000050904330000000903000029000000000403043300000020034000390000000006000019000000000756004b000008220000813d000000000736001900000020066000390000000008960019000000000808043300000000008704350000081a0000013d00000000063500190000000000060435000000000545001900000020075000390000000c0c00002900000000060c043300000000080000190000000d0b000029000000000968004b000008320000813d00000000097800190000002008800039000000000ac80019000000000a0a04330000000000a904350000082a0000013d000000000776001900000000000704350000000005560019000000200750003900000000060b043300000000080000190000000a0c000029000000000968004b000008410000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a90435000008390000013d000000000776001900000000000704350000000005560019000000200750003900000000060c043300000000080000190000000b0b000029000000000968004b000008500000813d00000000097800190000002008800039000000000ac80019000000000a0a04330000000000a90435000008480000013d000000000776001900000000000704350000000005560019000000200750003900000000060b04330000000008000019000000000968004b0000085e0000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a90435000008560000013d0000000007760019000000000007043500000000055600190000002007500039000000060b00002900000000060b04330000000008000019000000000968004b0000086d0000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a90435000008650000013d0000000007760019000000000007043500000000055600190000001f0620018f000000200750003900000002011003670000000508200270000000000980004c0000087f0000613d0000000009000019000000050a900210000000000ba70019000000000aa1034f000000000a0a043b0000000000ab04350000000109900039000000000a89004b000008770000413d000000000960004c0000088e0000613d0000000508800210000000000181034f00000000078700190000000306600210000000000807043300000000086801cf000000000868022f000000000101043b0000010006600089000000000161022f00000000016101cf000000000181019f0000000000170435000000000125001900000020051000390000000000050435000000080900002900000000020904330000000006000019000000000726004b0000089c0000813d00000000075600190000002006600039000000000896001900000000080804330000000000870435000008940000013d000000000552001900000000000504350000000001410049000000000112001900000000001404350000003f011000390000000702000029000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000035c0510009c00000b180000213d000000010220019000000b110000613d00000b180000013d000000010130008c00000b2b0000c13d0000038a0100004100000000001004390000800b0100003900000004020000390d4902840000040f0d490b720000040f000000020300036700080000000303530000000902000029000000000223034f000000000202043b000600000001001d00000000010200190d490b720000040f0000000c02000029000000080300035f000000000223034f000000000202043b000700000001001d00000000010200190d490b720000040f0000000d02000029000000080300035f000000000223034f000000000202043b000900000001001d00000000010200190d490b720000040f0000000a02000029000000080300035f000000000223034f000000000202043b0000035d02200197000c00000001001d00000000010200190d490b5a0000040f0000000b020000290000000202200367000000000202043b000d00000001001d00000000010200190d490b720000040f000000060700002900000000020704330000004009000039000000000809043300000020038000390000000004000019000000000524004b000009710000813d00000000053400190000002004400039000000000674001900000000060604330000000000650435000008df0000013d0000000003320019000000000003043500000000028200190000002004200039000000060e00002900000000030e043300000000050000190000000d0a0000290000000c0b000029000000090c000029000000080d000029000000000635004b000008fa0000813d000000000645001900000020055000390000000007e5001900000000070704330000000000760435000008f20000013d000000000443001900000000000404350000000002230019000000200420003900000000030d04330000000005000019000000000635004b000009080000813d000000000645001900000020055000390000000007d5001900000000070704330000000000760435000009000000013d000000000443001900000000000404350000000002230019000000200420003900000000030c04330000000005000019000000000635004b000009160000813d000000000645001900000020055000390000000007c50019000000000707043300000000007604350000090e0000013d000000000443001900000000000404350000000002230019000000200420003900000000030b04330000000005000019000000000635004b000009240000813d000000000645001900000020055000390000000007b50019000000000707043300000000007604350000091c0000013d000000000443001900000000000404350000000002230019000000200420003900000000030a04330000000005000019000000000635004b000009320000813d000000000645001900000020055000390000000007a50019000000000707043300000000007604350000092a0000013d000000000443001900000000000404350000000002230019000000200420003900000000030104330000000005000019000000000635004b000009400000813d00000000064500190000002005500039000000000715001900000000070704330000000000760435000009380000013d000000000143001900000000000104350000000001820049000000000113001900000000001804350000003f01100039000000200200008a000a00000002001d000000000221016f000d00000008001d0000000001820019000000000221004b000000000200001900000001020040390000035c0310009c00000b180000213d000000010220019000000b180000c13d000900000009001d00000000001904350000000f010000290000000e020000290d4903300000040f0000035c01200197000000010210008c000009ec0000c13d0000000f010000290000000e020000290d4903300000040f000000000220004c00000b230000613d0000000201100367000000000101043b000000010200008a0000035b03000041000000000221004b000000000200001900000000020320190000035b011001970000035b0410009c00000000030080190000035b011001670000035b0110009c00000000020360190000006001000039000000000220004c000009ed0000c13d0d490b460000040f000009ed0000013d0000000003320019000000000003043500000000028200190000002004200039000000070d00002900000000030d043300000000050000190000000d0a0000290000000c0b000029000000090c000029000000000635004b000009830000813d000000000645001900000020055000390000000007d50019000000000707043300000000007604350000097b0000013d000000000443001900000000000404350000000002230019000000200420003900000000030c04330000000005000019000000000635004b000009910000813d000000000645001900000020055000390000000007c5001900000000070704330000000000760435000009890000013d000000000443001900000000000404350000000002230019000000200420003900000000030b04330000000005000019000000000635004b0000099f0000813d000000000645001900000020055000390000000007b5001900000000070704330000000000760435000009970000013d000000000443001900000000000404350000000002230019000000200420003900000000030a04330000000005000019000000000635004b000009ad0000813d000000000645001900000020055000390000000007a5001900000000070704330000000000760435000009a50000013d000000000443001900000000000404350000000002230019000000200420003900000000030104330000000005000019000000000635004b000009bb0000813d00000000064500190000002005500039000000000715001900000000070704330000000000760435000009b30000013d000000000143001900000000000104350000000001820049000000000113001900000000001804350000003f01100039000000200200008a000a00000002001d000000000221016f000d00000008001d0000000001820019000000000221004b000000000200001900000001020040390000035c0310009c00000b180000213d000000010220019000000b180000c13d000900000009001d00000000001904350000000f010000290000000e020000290d4903300000040f0000035c01200197000000010210008c00000a7f0000c13d0000000f010000290000000e020000290d4903300000040f000000000220004c00000b230000613d0000000201100367000000000101043b000000010200008a0000035b03000041000000000221004b000000000200001900000000020320190000035b011001970000035b0410009c00000000030080190000035b011001670000035b0110009c00000000020360190000006001000039000000000220004c00000a800000c13d0d490b460000040f00000a800000013d0d490ba70000040f000c00000001001d0d4903e10000040f00000000030100190000000001030433000000000110004c00000b230000613d0000002001300039000000000201043300000361022001970000038b022001c700000000002104350000000c010000290000000001010433000800000001001d0000000d010000290000000001010433000700000001001d0000000f010000290000000e02000029000b00000003001d0d4903300000040f00000008010000290000000703000029000000000131001900000000012100190000000b02000029000000000202043300000000012100190000035c011001970d490be70000040f000800000001001d0000000f010000290000000e020000290d4903300000040f000000080a00002900000009030000290000000004030433000003710500004100000020034000390000000000530435000000210640003900000000050a04330000000007000019000000000857004b00000a200000813d000000000867001900000020077000390000000009a700190000000009090433000000000098043500000a180000013d00000000066500190000000000060435000000000545001900000021075000390000000d0b00002900000000060b04330000000008000019000000000968004b00000a2f0000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a9043500000a270000013d00000000077600190000000000070435000000000556001900000021075000390000000c0b00002900000000060b04330000000008000019000000000968004b00000a3e0000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a9043500000a360000013d0000000007760019000000000007043500000000055600190000001f0620018f000000210750003900000002011003670000000508200270000000000980004c00000a500000613d0000000009000019000000050a900210000000000ba70019000000000aa1034f000000000a0a043b0000000000ab04350000000109900039000000000a89004b00000a480000413d000000000960004c00000a5f0000613d0000000508800210000000000181034f00000000078700190000000306600210000000000807043300000000086801cf000000000868022f000000000101043b0000010006600089000000000161022f00000000016101cf000000000181019f00000000001704350000000001250019000000210510003900000000000504350000000b0900002900000000020904330000000006000019000000000726004b00000a6d0000813d0000000007560019000000200660003900000000089600190000000008080433000000000087043500000a650000013d00000000055200190000000000050435000000000141004900000000011200190000000102100039000000000024043500000040011000390000000a02000029000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000035c0510009c00000b180000213d000000010220019000000b110000613d00000b180000013d0d490ba70000040f000c00000001001d0d4903e10000040f00000000030100190000000001030433000000000110004c00000b230000613d0000002001300039000000000201043300000361022001970000038b022001c700000000002104350000000c010000290000000001010433000800000001001d0000000d010000290000000001010433000700000001001d0000000f010000290000000e02000029000b00000003001d0d4903300000040f00000008010000290000000703000029000000000131001900000000012100190000000b02000029000000000202043300000000012100190000035c011001970d490be70000040f000800000001001d0000000f010000290000000e020000290d4903300000040f000000080a00002900000009030000290000000004030433000003840500004100000020034000390000000000530435000000210640003900000000050a04330000000007000019000000000857004b00000ab30000813d000000000867001900000020077000390000000009a700190000000009090433000000000098043500000aab0000013d00000000066500190000000000060435000000000545001900000021075000390000000d0b00002900000000060b04330000000008000019000000000968004b00000ac20000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a9043500000aba0000013d00000000077600190000000000070435000000000556001900000021075000390000000c0b00002900000000060b04330000000008000019000000000968004b00000ad10000813d00000000097800190000002008800039000000000ab80019000000000a0a04330000000000a9043500000ac90000013d0000000007760019000000000007043500000000055600190000001f0620018f000000210750003900000002011003670000000508200270000000000980004c00000ae30000613d0000000009000019000000050a900210000000000ba70019000000000aa1034f000000000a0a043b0000000000ab04350000000109900039000000000a89004b00000adb0000413d000000000960004c00000af20000613d0000000508800210000000000181034f00000000078700190000000306600210000000000807043300000000086801cf000000000868022f000000000101043b0000010006600089000000000161022f00000000016101cf000000000181019f00000000001704350000000001250019000000210510003900000000000504350000000b0900002900000000020904330000000006000019000000000726004b00000b000000813d0000000007560019000000200660003900000000089600190000000008080433000000000087043500000af80000013d00000000055200190000000000050435000000000141004900000000011200190000000102100039000000000024043500000040011000390000000a02000029000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000035c0510009c00000b180000213d000000010220019000000b180000c13d00000040020000390000000000120435000000000204043300000000010300190d49026d0000040f0000000f00000005000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000000100001900000000020000190d4902a00000040f00000363010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000400100003900000000010104330000004402100039000003890300004100000000003204350000002402100039000000170300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000064020000390d4902a00000040f0000000004000019000000000534004b00000b430000813d0000000005240019000000000614001900000000060604330000000000650435000000200440003900000b3b0000013d00000000012300190000000000010435000000000001042d000000400200003900000000010204330000037a0310009c00000b520000813d0000004003100039000000000032043500000020021000390000039503000041000000000032043500000001020000390000000000210435000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f0000000002010019000000400300003900000000010304330000037a0410009c00000b6a0000813d0000004004100039000000000043043500000020031000390000039604000041000000000043043500000060022002100000002103100039000000000023043500000015020000390000000000210435000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f00020000000000020000007f0210008c000200000001001d00000b8f0000a13d0d490c1d0000040f000100000001001d00000002011000390d4904010000040f0000000002010433000000000220004c00000b9f0000613d0000002002100039000000000302043300000361033001970000000105000029000000f804500210000000000334019f000003950330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c000000000203001900000000020020190000002103100039000000000023043500000b9d0000013d0d4903e10000040f0000000002010433000000000220004c00000b9f0000613d0000000204000029000000f8024002100000035b03000041000000000440004c0000000002036019000000200310003900000000040304330000036104400197000000000224019f00000000002304350000000200000005000000000001042d00000363010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000200000000000200000000030100190000035c01300197000000010210008c00000bd70000613d000000370210008c00000bc80000a13d000200000001001d0d490c1d0000040f000100000001001d00000002011000390d4904010000040f0000000002010433000000000220004c00000bdf0000613d0000002002100039000000000302043300000361033001970000000105000029000000f804500210000000000334019f000003970330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c000000000203001900000000020020190000002103100039000000000023043500000bd50000013d000200000003001d0d4903e10000040f0000000002010433000000000220004c00000bdf0000613d0000000202000029000000f802200210000000200310003900000000040304330000036104400197000000000224019f0000035b0220016700000000002304350000000200000005000000000001042d00000363010000410000000000100435000000010100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f00000363010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000200000000000200000000030100190000035c01300197000000370210008c00000c060000a13d000200000001001d0d490c1d0000040f000100000001001d00000002011000390d4904010000040f0000000002010433000000000220004c00000c150000613d0000002002100039000000000302043300000361033001970000000105000029000000f804500210000000000334019f000003980330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c000000000203001900000000020020190000002103100039000000000023043500000c130000013d000200000003001d0d4903e10000040f0000000002010433000000000220004c00000c150000613d0000000202000029000000f802200210000000200310003900000000040304330000036104400197000000000242019f0000038b0220004100000000002304350000000200000005000000000001042d00000363010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f00000080021002700000037e0310009c000000000201a0190000037e0110009c0000000001000019000000100100203900000008031001bf0000035c0420009c000000000103201900000040032002700000035c0420009c000000000203201900000004031001bf000003540420009c00000000010320190000002003200270000003540420009c000000000203201900000002031001bf0000ffff0420008c000000000103201900000010032002700000000002032019000000ff0220008c000000000200001900000001020020390000000001210019000000000001042d00000040021000390000039903000041000000000032043500000020021000390000001a030000390000000000320435000000200200003900000000002104350000006001100039000000000001042d000000e0031000390000000202000367000000000332034f000000000303043b0000035d0330019800000c4d0000613d0000012001100039000000000112034f000000000101043b00000c620000013d000000a003100039000000000332034f0000006004100039000000000442034f000000000404043b000000000503043b00000000635400a9000000000650004c00000c590000613d00000000655300d9000000000445004b00000c630000c13d0000012001100039000000000112034f000000000201043b0000000001320019000000000221004b00000000020000190000000102004039000000010220019000000c630000c13d000000000001042d00000363010000410000000000100435000000110100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000000110004c00000c6e0000613d000000000001042d0000004001000039000000000101043300000044021000390000039a0300004100000000003204350000002402100039000000080300003900000000003204350000036702000041000000000021043500000004021000390000002003000039000000000032043500000064020000390d4902a00000040f00050000000000020000035d0a1001970000004009000039000000000b0904330000037a01b0009c00000cfb0000813d0000004001b0003900000000001904350000002001000039000400000001001d00000000001b04350000002003b000390000039b01000041000300000003001d0000000000130435000000010100003900000000050004140000000403a0008c00000c9d0000613d00000000040204330000002003200039000000000105001900000000020a001900000000050000190000000006000019000500000009001d00020000000a001d00010000000b001d0d4902150000040f000000010b000029000000020a0000290000000509000029000000600c0000390000000102000031000000000320004c00000cd00000613d0000035c0320009c00000cfb0000213d0000003f03200039000000200400008a000000000343016f000000000c09043300000000033c00190000000004c3004b000000000400001900000001040040390000035c0530009c00000cfb0000213d000000010440019000000cfb0000c13d000000000039043500000000002c04350000002002c00039000000030300036700000001050000310000001f0450018f0000000505500270000000000650004c00000cc10000613d000000000600001900000005076002100000000008720019000000000773034f000000000707043b00000000007804350000000106600039000000000756004b00000cb90000413d000000000640004c00000cd00000613d0000000505500210000000000353034f00000000025200190000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f000000000032043500000000020c0433000000000110004c00000d060000613d000000000120004c00000ce50000c13d00030000000c001d000500000009001d0000039c01000041000000000010043900000004010000390000000000a10439000080020100003900000024020000390d4902840000040f000000000110004c00000d300000613d000000030c00002900000000020c0433000000000120004c000000050900002900000cf90000613d0000035b01000041000000200320008c000000000300001900000000030140190000035b02200197000000000420004c000000000100a0190000035b0220009c000000000103c019000000000110004c00000d030000c13d0000002001c000390000000001010433000000000210004c0000000002000019000000010200c039000000000221004b00000d030000c13d000000000110004c00000d1d0000613d0000000500000005000000000001042d00000363010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190d4902a00000040f000000000100001900000000020000190d4902a00000040f000000000120004c00000d2e0000c13d0000000004090433000500000004001d0000036701000041000000000014043500000004014000390000000402000029000000000021043500000000030b0433000400000003001d00000024014000390000000000310435000000440240003900000003010000290d490b3a0000040f00000004010000290000001f01100039000000200200008a000000000121016f000000440210003900000005010000290d4902a00000040f000000000109043300000064021000390000039d03000041000000000032043500000044021000390000039e03000041000000000032043500000024021000390000002a0300003900000000003204350000036702000041000000000021043500000004021000390000000403000029000000000032043500000084020000390d4902a00000040f0000002001c000390d4902a00000040f0000000501000029000000000101043300000044021000390000039f03000041000000000032043500000024021000390000001d0300003900000000003204350000036702000041000000000021043500000004021000390000000403000029000000000032043500000064020000390d4902a00000040f00000d42002104210000000102000039000000000001042d0000000002000019000000000001042d00000d47002104230000000102000039000000000001042d0000000002000019000000000001042d00000d490000043200000d4a0001042e00000d4b00010430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000202bcce700000000000000000000000000000000000000000000000000000000a28c1aee00000000000000000000000000000000000000000000000000000000df9c158900000000000000000000000000000000000000000000000000000000e2f318e300000000000000000000000000000000000000000000000000000000eeb8cb098000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000ffffffffffffffffffffffffffffffffffffffffe1239cd800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff9f9cc7f708afc65944829bd487b90b72536b1951864fbfc14e125fc972a6507f3900ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1b000000000000000000000000000000000000000000000000000000000000004e487b7100000000000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000000000000949431dc000000000000000000000000000000000000000000000000000000008c5a34450000000000000000000000000000000000000000000000000000000008c379a000000000000000000000000000000000000000000000000000000000dd62ed3e00000000000000000000000000000000000000000000000000000000095ea7b300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff7f20746f206e6f6e2d7a65726f20616c6c6f77616e6365000000000000000000005361666545524332303a20617070726f76652066726f6d206e6f6e2d7a65726f74206d757374206265206174206c65617374203638206279746573206c6f6e6754686520617070726f76616c4261736564207061796d617374657220696e707574206265206174206c656173742034206279746573206c6f6e67000000000000546865207374616e64617264207061796d617374657220696e707574206d7573020000000000000000000000000000000000000000000000000000000000000002000002000000000000000000000000000000000000000000000000000000007261746f720000000000000000000000000000000000000000000000000000004661696c656420746f20706179207468652066656520746f20746865206f7065496e76616c69642063616c6c20746f20436f6e74726163744465706c6f79657275650000000000000000000000000000000000000000000000000000000000004e6f7420656e6f7567682062616c616e636520666f7220666565202b2076616c0000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000ffffffffffffff80000000000000000000000000000000000000000000000000ffffffffffffffc0000000000000000000000000000000000000000000000000ffffffffffffffa0202bcce700000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000010000000000000000ffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000ffffffff000000000000000000000000000000000000000000000000ffffffff00000000000000000000000001000000000000000000000000000000000000000000000000000000000000005369676e6174757265206c656e67746820697320696e636f72726563740000007fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a1496e76616c69642073000000000000000000000000000000000000000000000076206973206e656974686572203237206e6f7220323800000000000000000000456e636f64696e6720756e737570706f727465642074780000000000000000009a8a0592ac89c5ad3bc6df8224c17b485976f597df104ee20d0df415241f670bc00000000000000000000000000000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff848e1bfa1ac4e3576b728bda6721b215c70a7799a5b4866282a71bab954baac8000000000000000000000000000000000000000000000000fffffffffffffe1fad7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a519b453ce45aaaaf3a300f5a9ec95869b4f28ab10430b572ee218c3a6a5e07d6fc2f8787176b8ac6bf7215b4adcc1e069bf4ab82d9ab1df05a57a91d425935b6e000000000000000000000000000000000000000000000000ffffffffffffff5f1901000000000000000000000000000000000000000000000000000000000000808000000000000000000000000000000000000000000000000000000000000081000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000f800000000000000000000000000000000000000000000000000000000000000556e737570706f72746564207061796d617374657220666c6f770000000000004f766572666c6f770000000000000000000000000000000000000000000000005361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65641806aa1896bbf26568e884a7374b41e002500962caba6a15023a8d90e8508b836f742073756363656564000000000000000000000000000000000000000000005361666545524332303a204552433230206f7065726174696f6e20646964206e416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000000000000000000000000000000000000000000000000000000000000000000000", + "predeployed_contracts": { + "0xdeadbeafdeadbeafdeadbeafdeadbeafdeadbeaf": "0x00020000000000020001000000000002000000000301001900000060033002700000002b033001970001000000310355000000000000001f0000008001000039000000400a00003900000000001a04350000000102200190000000190000c13d000000300200004100000000002104350000002002000039000000840300003900000000002304350000001d02000039000000a40300003900000000002304350000003202000041000000c4030000390000000000230435000000640200003900a6009a0000040f0000000001000416000000000110004c000000390000c13d000000010100003900000000030004140000000002000410000000040420008c000000250000613d000000000103001900010000000a001d00a6008d0000040f000000010a00002900000060020000390000000003000031000000000430004c0000003c0000c13d000000000110004c000000760000c13d00000000010a043300000044021000390000003103000041000000000032043500000024021000390000001303000039000000000032043500000030020000410000000000210435000000040210003900000020030000390000000000320435000000640200003900a6009a0000040f0000000001000019000000000200001900a6009a0000040f0000002c0230009c0000006e0000813d0000001f02300039000000200400008a000000000242016f0000003f02200039000000000442016f00000000020a04330000000004420019000000000524004b000000000500001900000001050040390000002d0640009c0000006e0000213d00000001055001900000006e0000c13d00000000004a043500000000003204350000002003200039000000010400036700000000060000310000001f0560018f0000000506600270000000000760004c0000005e0000613d000000000700001900000005087002100000000009830019000000000884034f000000000808043b00000000008904350000000107700039000000000867004b000000560000413d000000000750004c000000290000613d0000000506600210000000000464034f00000000036300190000000305500210000000000603043300000000065601cf000000000656022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000464019f0000000000430435000000290000013d0000002e0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001900a6009a0000040f0000000001020433000000000110004c0000007f0000c13d0000002001000039000001000200003900000000001204390000012001000039000000000001043900a600980000040f00000000010a043300000044021000390000002f03000041000000000032043500000024021000390000001403000039000000000032043500000030020000410000000000210435000000040210003900000020030000390000000000320435000000640200003900a6009a0000040f0000002b030000410000002b0410009c0000000001038019000000c00110021000a600a10000040f000000000301001900000060033002700000002b0030019d0001000000010355000000010120018f000000000001042d0000003301000041000000a70001042e0000002b030000410000002b0410009c000000000103801900000040011002100000006002200210000000000121019f000000a800010430000000a4002104210000000102000039000000000001042d0000000002000019000000000001042d000000a600000432000000a70001042e000000a80001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff4e487b7100000000000000000000000000000000000000000000000000000000646174612073686f756c6420626520656d70747900000000000000000000000008c379a00000000000000000000000000000000000000000000000000000000063616c6c2073686f756c6420737563636565640000000000000000000000000066616c6c6261636b2073686f756c64206e6f742062652063616c6c656400000000000002000000000000000000000000000000400000010000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008002": "0x0004000000000002000400000000000200000000030100190000006003300270000000630430019700030000004103550002000000010355000000630030019d000100000000001f0000008006000039000000400500003900000000006504350000000101200190000000460000c13d0000000001000031000000040110008c000001150000413d0000000201000367000000000101043b000000e001100270000000650210009c000000510000613d000000660210009c0000009e0000613d000000670210009c000000ba0000613d000000680210009c000000dd0000613d000000690110009c000001150000c13d0000000001000416000000000110004c000001150000c13d000000040100008a00000000011000310000006a02000041000000200310008c000000000300001900000000030240190000006a01100197000000000410004c000000000200a0190000006a0110009c00000000010300190000000001026019000000000110004c000001150000c13d00000004010000390000000201100367000000000101043b0000006b01100197000400000006001d018701850000040f0000006f02100197000000700220009c00000000020000190000000102006039000000000310004c0000000003000019000000010300603900000000022301a0000000db0110027000000073011001970000000002010019000000000200c0190000000401000029000000000021043500000020020000390000000003000019018701620000040f0000000001000416000000000110004c000001150000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000006403000041018701620000040f0000000001000416000000000110004c000001150000c13d000000040100008a00000000011000310000006a02000041000000200310008c000000000300001900000000030240190000006a01100197000000000410004c000000000200a0190000006a0110009c00000000010300190000000001026019000000000110004c000001150000c13d0000000401000039000200000001001d0000000201100367000000000101043b0000006b01100197000300000001001d000400000005001d018701850000040f000000040200002900000000020204330000000003010019000000000130004c000001260000c13d000100000003001d0000006c010000410000000000120435000000040120003900000003030000290000000000310435000000000100041400000020040000390000000003020019000300000002001d0187012f0000040f000000000110004c000001020000c13d000000030200036700000001040000310000001f0340018f000000040100002900000000010104330000000504400270000000000540004c0000008d0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000000850000413d000000000530004c0000009c0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310187016c0000040f0000000001000416000000000110004c000001150000c13d000000040100008a00000000011000310000006a02000041000000200310008c000000000300001900000000030240190000006a01100197000000000410004c000000000200a0190000006a0110009c00000000010300190000000001026019000000000110004c000001150000c13d000400000005001d018701750000040f0000006b01100197018701850000040f000000040200002900000000030204330000000000130435000000200200003900000000010300190000000003000019018701620000040f0000000001000416000000000110004c000001150000c13d000000040100008a00000000011000310000006a02000041000000400310008c000000000300001900000000030240190000006a01100197000000000410004c000000000200a0190000006a0110009c00000000010300190000000001026019000000000110004c000001150000c13d00000002010003670000000402100370000000000202043b0000006b0320009c000001150000213d0000002401100370000000000101043b0000006f031001970000000004000411000080060440008c000001150000c13d000000700330009c000001150000c13d018701830000040f000000000100001900000000020000190000000003000019018701620000040f0000000001000416000000000110004c000001150000c13d000000040100008a00000000011000310000006a02000041000000200310008c000000000300001900000000030240190000006a01100197000000000410004c000000000200a0190000006a0110009c00000000010300190000000001026019000000000110004c000001150000c13d00000004010000390000000201100367000000000101043b0000006b0210009c000001150000213d0000000002000411000080060220008c000001150000c13d000400000001001d018701850000040f0000006f02100197000000700220009c0000000402000029000001150000c13d0000007201100197018701830000040f000000000100001900000000020000190000000003000019018701620000040f0000000102000031000000200120008c000000200100003900000000010240190000001f01100039000000600310018f00000003050000290000000001530019000000000331004b000000000300001900000001030040390000006d0410009c0000000404000029000001180000213d0000000103300190000001180000c13d0000000000140435000000200220008c000001200000813d000000000100001900000000020000190187016c0000040f00000071010000410000000000100435000000410100003900000002020000290000000000120435000000240200003900000000010000190187016c0000040f0000006e040000410000000003050433000000000330004c000000000201001900000001030000290000012b0000c13d0000006f013001970000006e04000041000000700110009c000000000403c0190000000001020019000000000041043500000020020000390000000003000019018701620000040f0002000000000002000200000004001d000100000003001d0000006303000041000000630420009c0000000002038019000000630410009c0000000001038019000000c0011002100000004002200210000000000112019f00000074011001c700008003020000390187017e0000040f000000010800002900000002040000290000001f0340018f0000000504400270000000000540004c0000014c0000613d000000000500001900000005065002100000000007680019000000000661034f000000000606043b00000000006704350000000105500039000000000645004b000001440000413d000000010220018f000000000530004c0000015c0000613d0000000504400210000000000541034f00000000044800190000000303300210000000000604043300000000063601cf000000000636022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000363019f000000000034043500030000000103550000006001100270000100630010019d00000000010200190000000200000005000000000001042d0000006304000041000000630510009c000000000104801900000040011002100000000001310019000000630320009c000000000204801900000060022002100000000001210019000001880001042e0000006303000041000000630420009c0000000002038019000000630410009c000000000103801900000040011002100000006002200210000000000112019f000001890001043000000004010000390000000201100367000000000101043b000000750210009c0000017b0000813d000000000001042d000000000100001900000000020000190187016c0000040f00000181002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d0000018700000432000001880001042e00000189000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e03fe177000000000000000000000000000000000000000000000000000000004de2e468000000000000000000000000000000000000000000000000000000004f1e1be000000000000000000000000000000000000000000000000000000000c2e4ff97000000000000000000000000000000000000000000000000000000001806aa188000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff5aa9b6b500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47000ff00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000004e487b7100000000000000000000000000000000000000000000000000000000ff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000001fffe0000000000000000000000000000000000000002400000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000800b": "0x0002000000000002000300000000000200010000000103550000006001100270000000cd0010019d00000080010000390000004005000039000000000015043500000001012001900000004d0000c13d0000000001000031000000040110008c000002610000413d0000000101000367000000000101043b000000e001100270000000d20210009c0000009a0000613d000000d30210009c000000650000613d000000d40210009c000000b50000613d000000d50210009c0000007f0000613d000000d60210009c000000e50000613d000000d70210009c000001000000613d000000d80210009c000001190000613d000000d90210009c0000014b0000613d000000da0210009c0000016f0000613d000000db0210009c0000018b0000613d000000dc0210009c000001a60000613d000000dd0210009c000001d60000613d000000de0210009c000001f10000613d000000df0210009c0000020d0000613d000000e00210009c000002330000613d000000e10210009c000002500000613d000000e20110009c000002610000c13d0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000501000039000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000ce010000410000000302000039032f032b0000040f0000000401000039000300000001001d032f032d0000040f000000cf0110019700008001011001bf0000000302000029032f032b0000040f000000d0010000410000000502000039032f032b0000040f000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000000d103000041032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d000300000005001d032f02df0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000601000039000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000201000039000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000600310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d00000001010003670000002402100370000000000302043b0000000002000411000080010220008c000002610000c13d0000008002300210000000000430004c000000d20000613d00000000433200d9000000e90330009c000001430000c13d0000000401100370000000000301043b0000000001230019000000000231004b000000000200001900000001020040390000000102200190000001430000c13d0000000702000039032f032b0000040f00000044010000390000000101100367000000000101043b0000000602000039032f032b0000040f000000000100001900000000020000190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000301000039000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d000300000005001d032f02df0000040f00000003010000290000000001010433000000000021043500000020020000390000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000200310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d000300000005001d00000004010000390000000101100367000000000101043b000200000001001d000000e8010000410000000000100439032f02b40000040f0000000202000029000000000121004b00000000010000190000027a0000413d000000e801000041000100000001001d0000000000100439032f02b40000040f0000000102000029000000000020043900000002020000290000000001210049000100000001001d032f02b40000040f0000000102000029000000000112004b0000026f0000a13d000000ea01000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019032f02cb0000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000200310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d00000004010000390000000101100367000000000101043b000000000010043500000008010000390000002002000039000200000002001d00000000001204350000000001000019000300000005001d032f02a10000040f032f032d0000040f000000030200002900000000020204330000000000120435000000000102001900000002020000290000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000101000039000300000005001d032f032d0000040f00000003020000290000000003020433000000e4011001970000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000001000019000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000800310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d00000001010003670000002402100370000000000202043b0000000401100370000000000301043b0000000001000411000080010110008c000002610000c13d000100000003001d000300000005001d0000000701000039000200000002001d032f032d0000040f0000000203000029000000e502100197000000000223004b000002810000813d000000030100002900000000010104330000004402100039000000e6030000410000000000320435000000e7020000410000000000210435000000240210003900000020030000390000000000320435000000040210003900000000003204350000006402000039032f02cb0000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000701000039000300000005001d032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000401000039000300000005001d032f032d0000040f00000003020000290000000003020433000000e4011001970000000000130435000000200200003900000000010300190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000200310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d00000004010000390000000101100367000000000201043b000000e40120009c000002610000213d0000000001000411000080010110008c000002610000c13d0000000101000039000300000001001d000200000002001d032f032d0000040f000000cf011001970000000202000029000000000121019f0000000302000029032f032b0000040f000000000100001900000000020000190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000200310008c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002610000c13d0000000001000411000080010110008c000002610000c13d00000004010000390000000101100367000000000101043b0000000202000039032f032b0000040f000000000100001900000000020000190000000003000019032f02c30000040f0000000001000416000000000110004c000002610000c13d000000040100008a0000000001100031000000e302000041000000000310004c00000000030000190000000003024019000000e301100197000000000410004c000000000200a019000000e30110009c00000000010300190000000001026019000000000110004c000002640000613d00000000010000190000000002000019032f02cb0000040f000300000005001d032f02df0000040f00000003050000290000000003050433000000200430003900000000002404350000000000130435000000000103001900000000020500190000000003000019032f02c30000040f000001010120008c000000000100001900000002020000290000027a0000813d00000000002004350000000801000039000000200200003900000000001204350000000001000019032f02a10000040f032f032d0000040f000000030200002900000000030204330000000000130435000000200200003900000000010300190000000003000019032f02c30000040f00000044020000390000000102200367000000000202043b0000008001100270000300000001001d0000000101100039000000000121004b00000000010000190000000101006039032f03010000040f0000000301000029032f02d20000040f00000000020100190000000101000029032f032b0000040f0000000301000029032f02e50000040f032f03160000040f0000000202000029032f02f20000040f032f02d90000040f00000064010000390000000101100367000000000101043b032f02dc0000040f00000002010000290000000102000029000000000021041c000000000100001900000000020000190000000003000019032f02c30000040f000000cd020000410000000003000414000000cd0430009c0000000003028019000000cd0410009c00000000010280190000004001100210000000c002300210000000000112019f000000eb011001c70000801002000039032f03260000040f0000000102200190000002b10000613d000000000101043b000000000001042d00000000010000190000000002000019032f02cb0000040f000000cd010000410000000002000414000000cd0320009c0000000001024019000000c001100210000000ec011001c70000800b02000039032f03260000040f0000000102200190000002c00000613d000000000101043b000000000001042d00000000010000190000000002000019032f02cb0000040f000000cd04000041000000cd0510009c000000000104801900000040011002100000006002200210000000000121019f0000000001310019000003300001042e000000cd03000041000000cd0410009c000000000103801900000040011002100000006002200210000000000121019f000003310001043000000000001004350000000801000039000000200200003900000000001204350000000001000019032f02a10000040f000000000001042d0000000702000039032f032b0000040f000000000001042d0000000602000039032f032b0000040f000000000001042d0000000701000039032f032d0000040f0000008003100270000000e5021001970000000001030019000000000001042d000000010200008a000000000221004b000002ea0000613d0000000101100039000000000001042d000000ea01000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019032f02cb0000040f0000000001120019000000000221004b000000000200001900000001020040390000000102200190000002f90000c13d000000000001042d000000ea01000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019032f02cb0000040f000000000110004c000003040000613d000000000001042d000000400100003900000000010104330000006402100039000000ed0300004100000000003204350000004402100039000000ee030000410000000000320435000000240210003900000028030000390000000000320435000000e70200004100000000002104350000000402100039000000200300003900000000003204350000008402000039032f02cb0000040f00000000020100190000008001200210000000000320004c0000031d0000613d00000000322100d9000000e90220009c0000031e0000c13d000000000001042d000000ea01000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019032f02cb0000040f00000329002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d0000032f00000432000003300001042e00000331000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000040000000ffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008e1bc9bf04000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fe173b970000000000000000000000000000000000000000000000000000000042cbb15c000000000000000000000000000000000000000000000000000000004d59979f000000000000000000000000000000000000000000000000000000006ef25c3a000000000000000000000000000000000000000000000000000000007877a79700000000000000000000000000000000000000000000000000000000796b89b90000000000000000000000000000000000000000000000000000000080b412460000000000000000000000000000000000000000000000000000000085df51fd00000000000000000000000000000000000000000000000000000000938b5f32000000000000000000000000000000000000000000000000000000009a8a0592000000000000000000000000000000000000000000000000000000009e830ad300000000000000000000000000000000000000000000000000000000a0803ef700000000000000000000000000000000000000000000000000000000a6ae0aac00000000000000000000000000000000000000000000000000000000a851ae7800000000000000000000000000000000000000000000000000000000bf1fe42000000000000000000000000000000000000000000000000000000000d4a4ca0d0000000000000000000000000000000000000000000000000000000019cae4628000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000ffffffffffffffffffffffffffffffff54696d657374616d70732073686f756c6420626520696e6372656d656e74616c08c379a00000000000000000000000000000000000000000000000000000000042cbb15ccdc3cad6266b0e7a08c0454b23bf29dc2df74b6f3c209e9336465bd100000000000000000000000000000001000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000040000000000000000000000000020000020000000000000000000000000000000400000000000000000000000020636f72726563740000000000000000000000000000000000000000000000005468652070726f766964656420626c6f636b206e756d626572206973206e6f74", + "0x0000000000000000000000000000000000008006": "0x0005000000000002000900000000000200000000030100190000006003300270000003050430019700040000004103550003000000010355000003050030019d000200000002001f000100000000001f0000008001000039000000400800003900000000001804350000000102200190000000550000c13d0000000002000031000000040320008c000003990000413d0000000303000367000000000403043b000000e006400270000003070460009c000000040520008a0000000404300370000000000a000411000001f40000613d000003080760009c000400000008001d000000600000613d000003090760009c000002540000613d0000030a0760009c000002810000613d0000030b0760009c000000a30000613d0000030c0760009c000002c50000613d0000030d0760009c000000ef0000613d0000030e0760009c000002f60000613d0000030f0760009c000001310000613d000003100160009c000001700000613d000003110160009c000001b10000613d000003120160009c000003990000c13d0000000001000416000000000110004c000003990000c13d000000040100008a00000000011000310000031302000041000000400310008c000000000300001900000000030240190000031301100197000000000410004c000000000200a019000003130110009c00000000010300190000000001026019000000000110004c000003990000c13d000000030100036700080000000103530000000401100370000000000101043b000900000001001d0c0f059b0000040f000000080100035f0000002401100370000000000201043b00000009010000290c0f079e0000040f00000004020000290000000003020433000003150110019700000000001304350000002002000039000000000103001900000000030000190c0f05880000040f0000000001000416000000000110004c000003990000c13d00000020020000390000010001000039000000000021043900000120020000390000000000020439000000400200003900000306030000410c0f05880000040f00090000000a001d00000000010200190c0f05bd0000040f000300000001001d000500000002001d000700000003001d000600000004001d000000020100003900000002021001870000000101200270000000000220004c000000700000c13d0000000901000029000003180110009c000000000100001900000001010040390c0f06c20000040f00000004010000290000000003010433000003190100004100000000001304350000000401300039000000090200002900000000002104350000000001000414000080030200003900000024040000390000002006000039000800000003001d00000000050300190c0f050a0000040f0000000104000031000000000110004c000003190000c13d00000004030003670000001f0240018f000000040100002900000000010104330000000504400270000000000540004c000000920000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b0000008a0000413d000000000520004c000000a10000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f0000000001000416000000000110004c000003990000c13d000000040100008a00000000011000310000031302000041000000200310008c000000000300001900000000030240190000031301100197000000000410004c000000000200a019000003130110009c00000000010300190000000001026019000000000110004c000003990000c13d00000004010000390000000301100367000000000101043b000900000001001d0c0f059b0000040f0c0f06250000040f00000009010000290000031501100197000000000010043500000020010000390000000000010435000000000100001900000004020000290c0f05620000040f000800000001001d00000004010000290000000001010433000900000001001d0c0f06170000040f00000008010000290c0f0c0d0000040f000800000001001d000000ff0110018f000700000001001d0c0f060c0000040f00000009010000290000000702000029000000000021043500000008010000290000000801100270000000ff0110018f000700000001001d0c0f060c0000040f00000009010000290000002002100039000800000002001d000000070300002900000000003204350000000002010433000700000002001d00000004010000290000000001010433000900000001001d00000000010200190c0f060c0000040f00000009010000290000000702000029000000000021043500000008010000290000000001010433000800000001001d0c0f060c0000040f0000000901000029000000200210003900000008030000290000000000320435000000040200002900000000030000190c0f05880000040f00090000000a001d00000000010200190c0f05bd0000040f000500000002001d000700000003001d000600000004001d000000020100003900000002021001870000000101200270000000000220004c000000fe0000c13d0000000901000029000003180110009c000000000100001900000001010040390c0f06c20000040f00000004010000290000000003010433000003190100004100000000001304350000000401300039000000090200002900000000002104350000000001000414000080030200003900000024040000390000002006000039000800000003001d00000000050300190c0f050a0000040f0000000104000031000000000110004c0000033f0000c13d00000004030003670000001f0240018f000000040100002900000000010104330000000504400270000000000540004c000001200000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000001180000413d000000000520004c0000012f0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031306000041000000200750008c000000000700001900000000070640190000031305500197000000000850004c000000000600a019000003130550009c00000000050700190000000005066019000000000550004c000003990000c13d000000000404043b000003140540009c000003990000213d00000023054000390000031306000041000000000725004b0000000007000019000000000706801900000313082001970000031305500197000000000985004b0000000006008019000000000585013f000003130550009c00000000050700190000000005066019000000000550004c000003990000c13d0000000405400039000000000353034f000000000b03043b0000031403b0009c000003990000213d00000024044000390000000503b00210000600000004001d0000000003430019000000000223004b000003990000213d0000800702a0008c000003b00000c13d0000000002000019000000000300001900070000000b001d0000000001b3004b000003bf0000813d0000000601000029000800000002001d00000000020b0019000900000003001d00000009030000290c0f07ba0000040f00000009030000290000000802000029000000070b00002900000060011000390000000301100367000000000101043b000000000221001900000001033000390000015f0000013d0000000001000416000000000110004c000003990000c13d000000040100008a00000000011000310000031302000041000000200310008c000000000300001900000000030240190000031301100197000000000410004c000000000200a019000003130110009c00000000010300190000000001026019000000000110004c000003990000c13d00000004030000390000000301300367000000000201043b000000010120008c000003990000213d000800000003001d000700000002001d0000000101000039000000020200003900000002022001880000018f0000c13d0000031801a0009c0000000001000019000000010100403900090000000a001d0c0f06c20000040f0000000901000029000000000010043500000020010000390000000000010435000000000100001900000004020000290c0f05620000040f000000040300002900000000040304330000031a0240009c000004870000213d00000040024000390000000000230435000600000004001d0c0f0c0d0000040f0000000604000029000000ff0210018f000000020320008c000001a90000813d00000000002404350000000801100270000000ff0110018f000000010210008c0000048f0000a13d0000031b010000410000000000100435000000210100003900000008020000290000000000120435000000240200003900000000010000190c0f05920000040f00090000000a001d00000000010200190c0f05e20000040f000500000002001d000700000003001d000600000004001d000300000005001d000000020100003900000002021001870000000101200270000000000220004c000001c10000c13d0000000901000029000003180110009c000000000100001900000001010040390c0f06c20000040f00000004010000290000000003010433000003190100004100000000001304350000000401300039000000090200002900000000002104350000000001000414000080030200003900000024040000390000002006000039000800000003001d00000000050300190c0f050a0000040f0000000104000031000000000110004c000003620000c13d00000004030003670000001f0240018f000000040100002900000000010104330000000504400270000000000540004c000001e30000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000001db0000413d000000000520004c000001f20000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031301000041000000400250008c000000000200001900000000020140190000031306500197000000000760004c000000000100a019000003130660009c000000000102c019000000000110004c000003990000c13d000000000604043b000003140160009c000003990000213d00000000016500490000031302000041000000a00410008c000000000400001900000000040240190000031301100197000000000510004c000000000200a019000003130110009c00000000010400190000000001026019000000000110004c000003990000c13d0000002401300370000000000201043b000003150120009c000003990000213d000000000100041000000000011a004b000003990000c13d0000000401600039000500000001001d000000000113034f000000000101043b000800000001001d000700000002001d000900000006001d0c0f09980000040f000000090100002900000024031000390000000301300367000000000101043b000003150210009c000003990000213d0000000802000029000400000003001d000600000001001d0c0f09fa0000040f0c0f06250000040f000000000201001900000020012000390000000000010435000000000002043500000006010000290c0f07050000040f000000090700002900000006020000290000000404000029000000070500002900000044037000390000000301000367000000000331034f000000000603043b000000000360004c0000000003000019000000010300c039000000000336004b000003990000c13d0000000803000029000000000360004c000004b70000c13d000000000141034f000000000701043b000003150170009c0000000806000029000003990000213d00000305010000410000000002000414000003050320009c0000000001024019000000c00110021000000316011001c70000800d02000039000000040300003900000317040000410c0f0bfb0000040f0000000101200190000003990000613d0000000001000019000000000200001900000000030000190c0f05880000040f0000000001000416000000000110004c000003990000c13d000000040100008a00000000011000310000031302000041000000200310008c000000000300001900000000030240190000031301100197000000000410004c000000000200a019000003130110009c00000000010300190000000001026019000000000110004c000003990000c13d00000004010000390000000301100367000000000201043b000000010120008c000003990000213d000800000002001d000000010100003900000002020000390000000202200188000002720000c13d0000031801a0009c0000000001000019000000010100403900090000000a001d0c0f06c20000040f0000000901000029000000000010043500000020010000390000000000010435000000400200003900000000010000190c0f05620000040f00000008020000290c0f06d70000040f0000000001000019000000000200001900000000030000190c0f05880000040f00090000000a001d00000000010200190c0f05e20000040f000200000001001d000500000002001d000700000003001d000600000004001d000300000005001d000000020100003900000002021001870000000101200270000000000220004c000002920000c13d0000000901000029000003180110009c000000000100001900000001010040390c0f06c20000040f00000004010000290000000003010433000003190100004100000000001304350000000401300039000000090200002900000000002104350000000001000414000080030200003900000024040000390000002006000039000800000003001d00000000050300190c0f050a0000040f0000000104000031000000000110004c000003860000c13d00000004030003670000001f0240018f000000040100002900000000010104330000000504400270000000000540004c000002b40000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000002ac0000413d000000000520004c000002c30000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f0000000001000416000000000110004c000003990000c13d0000000002000031000000040120008a0000031303000041000000800410008c000000000400001900000000040340190000031301100197000000000510004c000000000300a019000003130110009c00000000010400190000000001036019000000000110004c000003990000c13d00000003010003670000000403100370000000000403043b000003150340009c000003990000213d0000006401100370000000000101043b000003140310009c000003990000213d0000000401100039000900000004001d0c0f05a10000040f00000003030003670000002404300370000000000404043b0000004403300370000000000303043b0000000005010019000000000602001900000009010000290000000002040019000000000405001900000000050600190c0f076f0000040f00000004020000290000000003020433000003150110019700000000001304350000002002000039000000000103001900000000030000190c0f05880000040f0000000001000416000000000110004c000003990000c13d000000040100008a00000000011000310000031302000041000000200310008c000000000300001900000000030240190000031301100197000000000410004c000000000200a019000003130110009c00000000010300190000000001026019000000000110004c000003990000c13d00000004010000390000000301100367000000000101043b000900000001001d0c0f059b0000040f00000009010000290c0f06430000040f00000004020000290000000002020433000900000002001d000800000001001d0c0f060c0000040f000000090100002900000008020000290000000000210435000000200200003900000000030000190c0f05880000040f000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000008010000290000000001120019000000000221004b00000000020000190000000102004039000003140310009c0000000406000029000004870000213d0000000102200190000004870000c13d0000000000160435000000200140008c0000000901000029000003990000413d00000005020000290000000303000029000000070400002900000006050000290c0f076f0000040f0000000002010019000900000002001d0000000501000029000000070300002900000006040000290c0f08280000040f00000009010000290000031502100197000000040100002900000000010104330000000000210435000000200200003900000000030000190c0f05880000040f000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000008050000290000000001520019000000000221004b00000000020000190000000102004039000003140310009c0000000403000029000004870000213d0000000102200190000004870000c13d0000000000130435000000200140008c0000000901000029000003990000413d00000000020504330c0f079e0000040f0000000002010019000900000002001d0000000501000029000000070300002900000006040000290c0f08280000040f00000009010000290000031502100197000000040100002900000000010104330000000000210435000000200200003900000000030000190c0f05880000040f000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000008050000290000000001520019000000000221004b00000000020000190000000102004039000003140310009c0000000403000029000004870000213d0000000102200190000004870000c13d0000000000130435000000200140008c0000000901000029000003990000413d00000000020504330c0f079e0000040f0000000002010019000900000002001d00000005010000290000000303000029000000070400002900000006050000290c0f08da0000040f00000009010000290000031502100197000000040100002900000000010104330000000000210435000000200200003900000000030000190c0f05880000040f000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000008010000290000000001120019000000000221004b00000000020000190000000102004039000003140310009c0000000406000029000004870000213d0000000102200190000004870000c13d0000000000160435000000200140008c00000009010000290000039c0000813d000000000100001900000000020000190c0f05920000040f00000005020000290000000203000029000000070400002900000006050000290c0f076f0000040f0000000002010019000900000002001d00000005010000290000000303000029000000070400002900000006050000290c0f08da0000040f00000009010000290000031502100197000000040100002900000000010104330000000000210435000000200200003900000000030000190c0f05880000040f0000031c0200004100000000002104350000002003000039000000840200003900000000003204350000002d03000039000000a40400003900000000003404350000031d03000041000000c40400003900000000003404350000031e03000041000000e40400003900000000003404350c0f05920000040f0000000001000416000000000121004b000004a20000c13d0000000001000410000800000001001d0000000401000039000200000001001d0000000001000019000900000001001d0000000001b1004b000002500000813d000000060100002900000000020b001900000009030000290c0f07ba0000040f00000060011000390000000301100367000000000101043b000300000001001d0000000601000029000000070200002900000009030000290c0f07ba0000040f00000322020000410000000000200439000000080200002900000002030000290000000000230439000500000001001d0c0f05790000040f000000050d000029000000070b000029000000040a000029000000000110004c000003990000613d000000000c0a0433000003230100004100000000001c04350000000402c0003900000000010004140000000000a204350000000302d00367000000000202043b0000004403c0003900000000002304350000002002d000390000000302200367000000000202043b000003150320009c000003990000213d0000006403c0003900000000002304350000004002d000390000000302200367000000000202043b000000000320004c0000000003000019000000010300c039000000000332004b000003990000c13d0000008403c0003900000000002304350000006002d000390000000302200367000000000202043b000000a403c00039000000000023043500000000030000310000000002d300490000008004d000390000001f0520008a0000000302000367000000000442034f000000000404043b0000031306000041000000000754004b0000000007000019000000000706801900000313055001970000031308400197000000000958004b0000000006008019000000000558013f000003130550009c00000000050700190000000005066019000000000550004c000003990000c13d0000000004d40019000000000242034f000000000202043b000003140520009c000003990000213d000000200440003900000000032300490000031305000041000000000634004b0000000006000019000000000605201900000313033001970000031307400197000000000837004b0000000005008019000000000337013f000003130330009c00000000030600190000000003056019000000000330004c000003990000c13d000000c403c00039000000a0050000390000000000530435000000e403c0003900000000002304350000010403c0003900000003044003670000000505200270000000000650004c0000043f0000613d000000000600001900000005076002100000000008730019000000000774034f000000000707043b00000000007804350000000106600039000000000756004b000004370000413d0000001f0620018f000000000760004c0000044f0000613d0000000505500210000000000454034f00000000055300190000000306600210000000000705043300000000076701cf000000000767022f000000000404043b0000010006600089000000000464022f00000000046401cf000000000474019f0000000000450435000000000332001900000000000304350000002403c00039000080070400003900000000004304350000000803000029000000040330008c000004810000613d0000001f02200039000000200300008a000000000232016f00000104042000390000000303000029000000000230004c00010000000c001d000004770000613d00000305020000410000030505c0009c000000000502001900000000050c40190000004006500210000003050540009c00000000040280190000006004400210000000000564019f000003050410009c0000000001028019000000c001100210000000000115019f00000316011001c70000800902000039000000080400002900000000050000190c0f0bfb0000040f00000000030100190000006003300270000103050030019d0004000000010355000000010120018f0000047c0000013d000000080200002900000000030c001900000000050c001900000000060000190c0f050a0000040f000000000110004c000000040a000029000000070b000029000000010c000029000004e90000613d0000031401c0009c000004870000213d0000000000ca043500000009010000290000000101100039000003c70000013d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f0000002002400039000800000002001d00000000001204350000000702000029000000010220015f00000000011201a0000000000100001900000001010060390c0f06ed0000040f000000080100002900000007020000290c0f06370000040f000000090100002900000006020000290c0f07050000040f0000000001000019000000000200001900000000030000190c0f05880000040f0000000401000029000000000101043300000084021000390000031f0300004100000000003204350000006402100039000003200300004100000000003204350000004402100039000003210300004100000000003204350000002402100039000000450300003900000000003204350000031c020000410000000000210435000000040210003900000020030000390000000000320435000000a4020000390c0f05920000040f0000008403700039000000000331034f000000000a00003100000000047a0049000000230440008a000000000303043b0000031309000041000000000643004b0000000006000019000000000609801900000313044001970000031307300197000000000847004b00000000080000190000000008094019000000000447013f000003130440009c00000000040600190000000004086019000000000440004c000003990000c13d00000005040000290000000003430019000000000131034f000000000401043b000003140140009c000003990000213d00000000014a004900000020033000390000031309000041000000000613004b0000000008000019000000000809201900000313011001970000031306300197000000000716004b00000000070000190000000007094019000000000116013f000003130110009c00000000010800190000000001076019000000000110004c000003990000c13d00000000010500190c0f0a510000040f0000000404000029000000070500002900000003010003670000023f0000013d000000040200036700000001040000310000001f0340018f00000000010a04330000000504400270000000000540004c000004f90000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000004f10000413d000000000530004c000005080000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310c0f05920000040f0002000000000002000200000006001d000100000005001d0000030505000041000003050630009c00000000030580190000004003300210000003050640009c00000000040580190000006004400210000000000334019f000003050410009c0000000001058019000000c001100210000000000113019f0c0f0bfb0000040f000000010800002900000002040000290000001f0340018f0000000504400270000000000540004c000005290000613d000000000500001900000005065002100000000007680019000000000661034f000000000606043b00000000006704350000000105500039000000000645004b000005210000413d000000010220018f000000000530004c000005390000613d0000000504400210000000000541034f00000000044800190000000303300210000000000604043300000000063601cf000000000636022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000363019f000000000034043500040000000103550000006001100270000103050010019d00000000010200190000000200000005000000000001042d0001000000000002000100000005001d0000030505000041000003050630009c00000000030580190000004003300210000003050640009c00000000040580190000006004400210000000000334019f000003050410009c0000000001058019000000c001100210000000000113019f0c0f0c000000040f0000000106000029000000010220018f000000000300001900000005043002100000000005460019000000000441034f000000000404043b00000000004504350000000103300039000000000430004c000000000400001900000001040060390000000104400190000005510000c13d00040000000103550000006001100270000103050010019d00000000010200190000000100000005000000000001042d0000030503000041000003050410009c00000000010380190000004001100210000003050420009c00000000020380190000006002200210000000000112019f0000000002000414000003050420009c0000000002038019000000c002200210000000000112019f00000316011001c700008010020000390c0f0c000000040f0000000102200190000005760000613d000000000101043b000000000001042d000000000100001900000000020000190c0f05920000040f00000305010000410000000002000414000003050320009c0000000001024019000000c00110021000000324011001c700008002020000390c0f0c000000040f0000000102200190000005850000613d000000000101043b000000000001042d000000000100001900000000020000190c0f05920000040f0000030504000041000003050510009c000000000104801900000040011002100000000001310019000003050320009c00000000020480190000006002200210000000000121001900000c100001042e0000030503000041000003050420009c0000000002038019000003050410009c000000000103801900000040011002100000006002200210000000000112019f00000c1100010430000003250110009c0000059e0000813d000000000001042d000000000100001900000000020000190c0f05920000040f0000001f031000390000031304000041000000000523004b0000000005000019000000000504401900000313062001970000031303300197000000000763004b000000000400a019000000000363013f000003130330009c00000000030500190000000003046019000000000330004c000005ba0000613d0000000303100367000000000303043b000003140430009c000005ba0000213d00000020011000390000000004310019000000000224004b000005ba0000213d0000000002030019000000000001042d000000000100001900000000020000190c0f05920000040f00020000000000020000000002010019000000040120008a00000313030000410000005f0410008c000000000400001900000000040320190000031301100197000000000510004c0000000003008019000003130110009c00000000010400190000000001036019000000000110004c000005df0000613d00000003030003670000004401300370000000000101043b000003140410009c000005df0000213d0000000404300370000000000404043b000200000004001d0000002403300370000000000303043b000100000003001d00000004011000390c0f05a10000040f00000000030100190000000004020019000000020100002900000001020000290000000200000005000000000001042d000000000100001900000000020000190c0f05920000040f00020000000000020000000002010019000000040120008a00000313030000410000007f0410008c000000000400001900000000040320190000031301100197000000000510004c0000000003008019000003130110009c00000000010400190000000001036019000000000110004c000006090000613d00000003010003670000002403100370000000000303043b000200000003001d0000000403100370000000000303043b000100000003001d0000004401100370000000000101043b000003140310009c000006090000213d00000004011000390c0f05a10000040f0000000003010019000000000402001900000064010000390000000301100367000000000501043b000000010150008c000006090000213d000000010100002900000002020000290000000200000005000000000001042d000000000100001900000000020000190c0f05920000040f000000020110008c0000060f0000813d000000000001042d0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000003260210009c0000061d0000813d000000400110003900000040020000390000000000120435000000000001042d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f00000040020000390000000001020433000003260310009c0000062f0000813d00000040031000390000000000320435000000200210003900000000000204350000000000010435000000000001042d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000020320008c0000063b0000813d0000000000210435000000000001042d0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f00030000000000020000031501100197000100000001001d0000000000100435000000200100003900000000000104350000004002000039000300000002001d00000000010000190c0f05620000040f00000003030000290000000004030433000003260240009c000006960000813d00000040024000390000000000230435000200000004001d0c0f0c0d0000040f0000000204000029000000ff0210018f000000020320008c0000068e0000813d00000000002404350000000801100270000000ff0110018f000000010210008c0000068e0000213d000000200240003900000000001204350000000002040433000000010120008c0000068e0000213d0000000101000039000000000220004c0000068c0000c13d00000003010000290000000005010433000003270100004100000000001504350000000402500039000000000100041400000001030000290000000000320435000080020200003900000024040000390000000003050019000200000005001d0c0f053f0000040f0000000104000031000000000110004c0000069e0000613d000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000002050000290000000001520019000000000221004b00000000020000190000000102004039000003140310009c0000000303000029000006960000213d0000000102200190000006960000c13d00000000001304350000001f0140008c000006bf0000a13d0000000001050433000000000110004c000000000100001900000001010060390000000300000005000000000001042d0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f00000004030003670000001f0240018f000000030100002900000000010104330000000504400270000000000540004c000006ae0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000006a60000413d000000000520004c000006bd0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f000000000100001900000000020000190c0f05920000040f000000000110004c000006c50000613d000000000001042d000000400100003900000000010104330000006402100039000003280300004100000000003204350000004402100039000003290300004100000000003204350000002402100039000000240300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000084020000390c0f05920000040f0002000000000002000200000002001d000000020220008c000006e50000813d000100000001001d0c0f0c0d0000040f000001000200008a000000000121016f0000000202000029000000000121019f00000001020000290c0f0c0b0000040f0000000200000005000000000001042d0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000000110004c000006f00000613d000000000001042d0000004001000039000000000101043300000084021000390000032a03000041000000000032043500000064021000390000032b03000041000000000032043500000044021000390000032c0300004100000000003204350000002402100039000000430300003900000000003204350000031c020000410000000000210435000000040210003900000020030000390000000000320435000000a4020000390c0f05920000040f0002000000000002000200000002001d0000031501100197000000000010043500000020010000390000000000010435000000400200003900000000010000190c0f05620000040f00000002020000290000000002020433000000020320008c000007260000813d000100000001001d0c0f06d70000040f0000000101000029000000020200002900000020022000390000000002020433000200000002001d000000010220008c000007260000213d0c0f0c0d0000040f0000032d02000041000000000121016f000000020200002900000008022002100000ff000220018f000000000121019f00000001020000290c0f0c0b0000040f0000000200000005000000000001042d0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f00000000040100190000032e0120009c000007640000813d0000003f01200039000000200500008a000000000651016f000000400500003900000000010504330000000006610019000000000716004b00000000070000190000000107004039000003140860009c000007640000213d0000000107700190000007640000c13d000000000065043500000000002104350000000005420019000000000335004b0000076c0000213d0000001f0520018f000000030440036700000020031000390000000506200270000000000760004c000007520000613d000000000700001900000005087002100000000009830019000000000884034f000000000808043b00000000008904350000000107700039000000000867004b0000074a0000413d000000000750004c000007610000613d0000000506600210000000000464034f00000000066300190000000305500210000000000706043300000000075701cf000000000757022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000474019f000000000046043500000000022300190000000000020435000000000001042d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000000100001900000000020000190c0f05920000040f0003000000000002000200000003001d000300000002001d000100000001001d0000000003000031000000000104001900000000020500190c0f072e0000040f000000000201043300000020011000390c0f05620000040f000000010200002900000315042001970000004003000039000000000203043300000040052000390000000000450435000000600420003900000002050000290000000000540435000000800420003900000003050000290000000000540435000000a0042000390000000000140435000000a00100003900000000001204350000032f0400004100000020012000390000000000410435000003300420009c000007960000813d000000c004200039000000000043043500000000020204330c0f05620000040f00000315011001970000000300000005000000000001042d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000400400003900000000030404330000006005300039000000000025043500000315011001970000004002300039000000000012043500000060010000390000000000130435000000200130003900000331020000410000000000210435000003320230009c000007b20000813d0000008002300039000000000024043500000000020304330c0f05620000040f0000031501100197000000000001042d0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000000223004b000007d20000813d000000050230021000000000021200190000000302200367000000000202043b00000000031000790000009f0330008a0000031304000041000000000532004b0000000005000019000000000504401900000313033001970000031306200197000000000736004b000000000400a019000000000336013f000003130330009c00000000030500190000000003046019000000000330004c000007da0000613d0000000001120019000000000001042d0000031b010000410000000000100435000000320100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000000100001900000000020000190c0f05920000040f000000000110004c000007e00000613d000000000001042d0000004001000039000000000101043300000044021000390000033303000041000000000032043500000024021000390000001c0300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000064020000390c0f05920000040f000000000110004c000007f20000613d000000000001042d000000400100003900000000010104330000006402100039000003340300004100000000003204350000004402100039000003350300004100000000003204350000002402100039000000280300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000084020000390c0f05920000040f000000000110004c000008070000613d000000000001042d000000400100003900000000010104330000004402100039000003360300004100000000003204350000002402100039000000150300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000064020000390c0f05920000040f000000000110004c000008190000613d000000000001042d000000400100003900000000010104330000004402100039000003370300004100000000003204350000002402100039000000130300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000064020000390c0f05920000040f0007000000000002000200000004001d000100000003001d000700000002001d000400000001001d000000000110004c0000000001000019000000010100c0390c0f07dd0000040f00000007010000290000031501100197000500000001001d0000ffff0110008c000000000100001900000001010020390c0f07ef0000040f0000004001000039000600000001001d000000000301043300000338010000410000000000130435000000040230003900000000010004140000000504000029000000000042043500008002020000390000002404000039000300000003001d00000000050300190c0f053f0000040f000000000110004c000008b00000613d0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600320018f00000003050000290000000002530019000000000332004b00000000030000190000000103004039000003140420009c0000000004050019000008d20000213d0000000103300190000008d20000c13d000000060300002900000000002304350000001f0110008c000008ad0000a13d0000000001040433000000000110004c000000000100001900000001010060390c0f08040000040f00000006010000290000000005010433000003390100004100000000001504350000000402500039000000000100041400000005030000290000000000320435000080030200003900000024040000390000000003050019000300000005001d0c0f053f0000040f000000000110004c000008b00000613d0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600320018f00000003050000290000000002530019000000000332004b00000000030000190000000103004039000003140420009c0000000004050019000008d20000213d0000000103300190000008d20000c13d00000006030000290000000000230435000000200110008c000008ad0000413d0000000001040433000000000110004c000000000100001900000001010060390c0f08160000040f00000004010000290c0f09980000040f000000070100002900000004020000290c0f09fa0000040f0c0f06250000040f000000000201001900000020012000390000000000010435000000000002043500000007010000290c0f07050000040f0000000001000411000600000001001d0000000702000029000000010300002900000002040000290c0f0a510000040f00000305010000410000000002000414000003050320009c000000000102401900000006020000290000031505200197000000c00110021000000316011001c70000800d0200003900000004030000390000031704000041000000040600002900000005070000290c0f0bfb0000040f0000000101200190000008ad0000613d0000000700000005000000000001042d000000000100001900000000020000190c0f05920000040f000000040200036700000001040000310000001f0340018f000000060100002900000000010104330000000504400270000000000540004c000008c10000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b000008b90000413d000000000530004c000008d00000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f0008000000000002000200000005001d000100000004001d000300000003001d000800000002001d000500000001001d000000000110004c0000000001000019000000010100c0390c0f07dd0000040f00000008010000290000031501100197000600000001001d0000ffff0110008c000000000100001900000001010020390c0f07ef0000040f0000004001000039000700000001001d000000000301043300000338010000410000000000130435000000040230003900000000010004140000000604000029000000000042043500008002020000390000002404000039000400000003001d00000000050300190c0f053f0000040f000000000110004c000009660000613d0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600320018f00000004050000290000000002530019000000000332004b00000000030000190000000103004039000003140420009c0000000004050019000009880000213d0000000103300190000009880000c13d000000070300002900000000002304350000001f0110008c000009630000a13d0000000001040433000000000110004c000000000100001900000001010060390c0f08040000040f00000007010000290000000005010433000003390100004100000000001504350000000402500039000000000100041400000006030000290000000000320435000080030200003900000024040000390000000003050019000400000005001d0c0f053f0000040f000000000110004c000009660000613d0000000101000031000000200210008c000000200200003900000000020140190000001f02200039000000600320018f00000004050000290000000002530019000000000332004b00000000030000190000000103004039000003140420009c0000000004050019000009880000213d0000000103300190000009880000c13d00000007030000290000000000230435000000200110008c000009630000413d0000000001040433000000000110004c000000000100001900000001010060390c0f08160000040f00000005010000290c0f09980000040f000000080100002900000005020000290c0f09fa0000040f0c0f06250000040f00000000020100190000000303000029000000020130008c000009900000813d00000000003204350000002001200039000000000001043500000008010000290c0f07050000040f0000000001000411000700000001001d0000000802000029000000010300002900000002040000290c0f0a510000040f00000305010000410000000002000414000003050320009c000000000102401900000007020000290000031505200197000000c00110021000000316011001c70000800d0200003900000004030000390000031704000041000000050600002900000006070000290c0f0bfb0000040f0000000101200190000009630000613d0000000800000005000000000001042d000000000100001900000000020000190c0f05920000040f000000040200036700000001040000310000001f0340018f000000070100002900000000010104330000000504400270000000000540004c000009770000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b0000096f0000413d000000000530004c000009860000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f0000031b010000410000000000100435000000210100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f00020000000000020000004002000039000200000002001d00000000030204330000033a02000041000000000023043500000004023000390000000000120435000000000100041400008004020000390000002404000039000100000003001d00000000050300190c0f053f0000040f0000000104000031000000000110004c000009c10000613d000000200140008c000000200100003900000000010440190000001f01100039000000600210018f00000001050000290000000001520019000000000221004b00000000020000190000000102004039000003140310009c0000000003050019000009e20000213d0000000102200190000009e20000c13d000000020200002900000000001204350000001f0240008c000009ea0000a13d0000000002030433000000000220004c000009ed0000613d0000000200000005000000000001042d00000004030003670000001f0240018f000000020100002900000000010104330000000504400270000000000540004c000009d10000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000009c90000413d000000000520004c000009e00000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000000100001900000000020000190c0f05920000040f00000044021000390000033b03000041000000000032043500000024021000390000001a0300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000064020000390c0f05920000040f0003000000000002000200000002001d000300000001001d0000032201000041000000000010043900008002010000390000000402000039000100000002001d00000000001204390c0f05790000040f000000000110004c00000a240000613d00000002010000290000033c011001970000033d011001c70000004002000039000200000002001d0000000003020433000000240230003900000000001204350000033e0100004100000000001304350000000301000029000003150110019700000004023000390000000000120435000000000100041400008002020000390000004404000039000300000003001d000000000503001900000000060000190c0f050a0000040f000000000110004c00000a270000613d00000003020000290000032e0120009c00000a490000813d000000020100002900000000002104350000000300000005000000000001042d000000000100001900000000020000190c0f05920000040f000000040200036700000001040000310000001f0340018f000000020100002900000000010104330000000504400270000000000540004c00000a380000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b00000a300000413d000000000530004c00000a470000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310c0f05920000040f0000031b010000410000000000100435000000410100003900000001020000290000000000120435000000240200003900000000010000190c0f05920000040f00060000000000020000000005030019000500000002001d000600000001001d0000000001000416000000000110004c00000a850000613d000300000005001d000400000004001d000003220100004100000000001004390000800a01000039000000040200003900000000001204390c0f05790000040f000000000110004c00000ba20000613d0000004001000039000100000001001d00000000050104330000033f0100004100000000001504350000004402500039000000000100041400000000030004160000000000320435000000050200002900000315022001970000002403500039000000000023043500000000020004100000031502200197000000040350003900000000002304350000800a0200003900000064040000390000000003050019000200000005001d00000000060000190c0f050a0000040f000000000110004c00000bad0000613d0000000202000029000003140120009c0000000404000029000000030500002900000ba50000213d000000010100002900000000002104350000000001000416000003400110019700000000000104170000000003000031000000000105001900000000020400190c0f072e0000040f000400000001001d00000000010104330c0f0be90000040f000300000001001d00000000010004140c0f0be90000040f0000000402000029000000400220021000000341022000410000034202200197000000030300002900000060033002100000034303300197000000000232019f000000c0011002100000034401100197000000000112019f0000000602000029000003150d2001970000033d011001c700000005020000290c0f0c050000040f000000000302001900000000020100190000006002200270000103050020019d0000030502200197000400000001035500000001033001900000001f0320018f000000050420027000000bcf0000613d0000003f012000390000034506100197000000400c00003900000000050c04330000000001650019000000000751004b00000000070000190000000107004039000003140810009c00000ba50000213d000000010770019000000ba50000c13d00000000001c043500000000002504350000002001500039000000200860008a0000001f0680018f000000000700003100000003077003670000000508800270000000000980004c00000ac80000613d0000000009000019000000050a900210000000000ba10019000000000aa7034f000000000a0a043b0000000000ab04350000000109900039000000000a89004b00000ac00000413d000000000960004c00000ad70000613d0000000508800210000000000787034f00000000088100190000000306600210000000000908043300000000096901cf000000000969022f000000000707043b0000010006600089000000000767022f00000000066701cf000000000696019f000000000068043500000000002504350000000106000031000000000262004b00000ba20000213d0000000402000367000000000640004c00000ae70000613d000000000600001900000005076002100000000008710019000000000772034f000000000707043b00000000007804350000000106600039000000000746004b00000adf0000413d000000000630004c00000af60000613d0000000504400210000000000242034f00000000044100190000000303300210000000000604043300000000063601cf000000000636022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000262019f000000000024043500000000020504330000031303000041000000200420008c000000000400001900000000040340190000031305200197000000000650004c000000000300a019000003130550009c000000000304c019000000000330004c00000ba20000c13d0000000003010433000003140430009c00000ba20000213d000000000221001900000000011300190000001f031000390000031304000041000000000523004b0000000005000019000000000504801900000313033001970000031306200197000000000763004b0000000004008019000000000363013f000003130330009c00000000030500190000000003046019000000000330004c00000ba20000c13d0000000003010433000003140430009c00000ba50000213d00000005043002100000003f04400039000000200500008a000000000454016f00000000090c04330000000004490019000000000594004b00000000050000190000000105004039000003140640009c00000ba50000213d000000010550019000000ba50000c13d00000000004c04350000000000390435000000200110003900000006033002100000000003130019000000000423004b00000ba20000213d0000000004090019000000000531004b00000b4c0000813d00000000051200490000031306000041000000400750008c000000000700001900000000070640190000031305500197000000000850004c000000000600a019000003130550009c00000000050700190000000005066019000000000550004c00000ba20000c13d00000000050c04330000031a0650009c00000ba50000213d0000002004400039000000400650003900000000006c04350000000006010433000000000065043500000020061000390000000006060433000000200750003900000000006704350000000000540435000000400110003900000b2e0000013d0000032201000041000000000010043900008005010000390000000402000039000300000002001d000000000012043900060000000c001d000400000009001d0c0f05790000040f00000004070000290000000604000029000000000110004c00000ba20000613d000000000304043300000346010000410000000000130435000000240230003900000000010004140000000000420435000000050200002900000315042001970000000402300039000200000004001d000000000042043500000000020704330000004404300039000000000024043500000064083000390000000004000019000000000524004b00000b760000813d000000200770003900000000050704330000000006050433000000000068043500000020055000390000000005050433000000200680003900000000005604350000000104400039000000400880003900000b690000013d0000000004380049000080050200003900000000050300190000000006000019000500000003001d0c0f050a0000040f000000000110004c00000bad0000613d0000000502000029000003140120009c000000060100002900000ba50000213d0000000000210435000003220100004100000000001004390000800201000039000000030200002900000000001204390c0f05790000040f0000000602000029000000000110004c00000ba20000613d0000000005020433000003470100004100000000001504350000000402500039000000000100041400000002030000290000000000320435000080020200003900000024040000390000000003050019000500000005001d00000000060000190c0f050a0000040f000000000110004c00000bad0000613d0000000502000029000003140120009c000000060100002900000ba50000213d00000000002104350000000600000005000000000001042d000000000100001900000000020000190c0f05920000040f0000031b010000410000000000100435000000410100003900000004020000390000000000120435000000240200003900000000010000190c0f05920000040f000000040200036700000001040000310000001f0340018f000000400100003900000000010104330000000504400270000000000540004c00000bbe0000613d000000000500001900000005065002100000000007610019000000000662034f000000000606043b00000000006704350000000105500039000000000645004b00000bb60000413d000000000530004c00000bcd0000613d0000000504400210000000000242034f00000000044100190000000303300210000000000504043300000000053501cf000000000535022f000000000202043b0000010003300089000000000232022f00000000023201cf000000000252019f000000000024043500000001020000310c0f05920000040f000000000540004c00000bd90000613d00000000050000190000000506500210000000000761034f000000000707043b00000000007604350000000105500039000000000645004b00000bd20000413d000000000530004c00000be70000613d00000003033002100000000504400210000000000504043300000000053501cf000000000535022f000000000141034f000000000101043b0000010003300089000000000131022f00000000013101cf000000000151019f000000000014043500000000010000190c0f05920000040f000003480210009c00000bec0000813d000000000001042d000000400100003900000000010104330000004402100039000003490300004100000000003204350000002402100039000000080300003900000000003204350000031c02000041000000000021043500000004021000390000002003000039000000000032043500000064020000390c0f05920000040f00000bfe002104210000000102000039000000000001042d0000000002000019000000000001042d00000c03002104230000000102000039000000000001042d0000000002000019000000000001042d000000000f0d001900000c09002104290000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d00000c0f0000043200000c100001042e00000c11000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f3385fb6000000000000000000000000000000000000000000000000000000003cda33510000000000000000000000000000000000000000000000000000000057180981000000000000000000000000000000000000000000000000000000005d382700000000000000000000000000000000000000000000000000000000007b510fe80000000000000000000000000000000000000000000000000000000084da1fb4000000000000000000000000000000000000000000000000000000009c4d535b00000000000000000000000000000000000000000000000000000000bb0fd61000000000000000000000000000000000000000000000000000000000e9f18c1700000000000000000000000000000000000000000000000000000000ec8067c700000000000000000000000000000000000000000000000000000000ecf95b8a00000000000000000000000000000000000000000000000000000000187598a58000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0200000000000000000000000000000000000000000000000000000000000000290afdae231a3fc0bbae8b1af63698b0a1d79b21ad17df0342dfb952fe74f8e50000000000000000000000000000000000000000000000000000000000010000306395c600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffbf4e487b710000000000000000000000000000000000000000000000000000000008c379a00000000000000000000000000000000000000000000000000000000043616e206f6e6c792062652063616c6c656420627920464f5243455f4445504c4f5945525f434f4e5452414354000000000000000000000000000000000000006d656e74730000000000000000000000000000000000000000000000000000002074686520636f6d62696e6564206076616c75656073206f66206465706c6f796076616c7565602070726f7669646564206973206e6f7420657175616c20746f1806aa1896bbf26568e884a7374b41e002500962caba6a15023a8d90e8508b83f3385fb60000000000000000000000000000000000000000000000000000000002000002000000000000000000000000000000240000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffc04de2e46800000000000000000000000000000000000000000000000000000000666c61670000000000000000000000000000000000000000000000000000000054686973206d6574686f6420726571756972652073797374656d2063616c6c20696e6700000000000000000000000000000000000000000000000000000000006f6d2073657175656e7469616c20746f20617262697472617279206f726465724974206973206f6e6c7920706f737369626c6520746f206368616e6765206672ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff00000000000000000000000000000000000000000000000100000000000000002020dba91b30cc0006188af794c2fb30dd8520db7e2c088b7fc7c103c00ca494000000000000000000000000000000000000000000000000ffffffffffffff4063bae3a9951d38e8a3fbb7b70909afc1200610fc5bc55ade242f815974674f23000000000000000000000000000000000000000000000000ffffffffffffff8042797465636f6465486173682063616e206e6f74206265207a65726f00000000656c20737061636500000000000000000000000000000000000000000000000043616e206e6f74206465706c6f7920636f6e74726163747320696e206b65726e436f64652068617368206973206e6f6e2d7a65726f00000000000000000000004163636f756e74206973206f6363757069656400000000000000000000000000e03fe177000000000000000000000000000000000000000000000000000000005aa9b6b5000000000000000000000000000000000000000000000000000000004c6314f00000000000000000000000000000000000000000000000000000000054686520636f64652068617368206973206e6f74206b6e6f776e000000000000ff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00010000000000000000000000000000000000000000000000000000000000004f1e1be000000000000000000000000000000000000000000000000000000000579952fc0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000ffffffff000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ffffffe0ad7e232e00000000000000000000000000000000000000000000000000000000c2e4ff970000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000004f766572666c6f770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000001": "0x00010000000000020000000000010355000000800300003900000040010000390000000000310435000000000300041600000001022001900000003b0000c13d000000000230004c000000450000c13d00000000020004120000001a022001970000000003000410000000000232004b000000450000c13d00000000040003670000006002400370000000000302043b0000001b063000410000002002400370000000000202043b0000004005400370000000000505043b0000001b075000410000001c0770009c000000470000413d0000001d0720008a000000020800008a000000000787004b000000470000413d0000001b0660009c000000470000a13d000000000404043b000000000101043300000060061000390000000000360435000000400310003900000000005304350000001b0220008a000000200310003900000000002304350000000000410435000000800110008c0000004b0000c13d0000000001000414000004580110008c000000450000413d00000458010000390000001e020000410000000001120420000000000110004c000000450000613d0000000001000433000000010110008c000000470000c13d000000200100003900000000020100190000000003000019005e00520000040f000000000130004c000000450000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000001903000041005e00520000040f0000000001000019005e005c0000040f000000000100001900000000020000190000000003000019005e00520000040f0000001d0100004100000000001004350000000101000039000000040200003900000000001204350000002401000039005e005c0000040f0000001f040000410000001f0510009c0000000001048019000000400110021000000000013100190000001f0320009c0000000002048019000000600220021000000000012100190000005f0001042e000000600110021000000060000104300000005e000004320000005f0001042e00000060000104300000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000014551231950b75fc4402da1732fc9bebf000000000000000000000000000000014551231950b75fc4402da1732fc9bec04e487b7100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000040000000400000000000000000000000000000000000000000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008005": "0x00020000000000020007000000000002000100000001035500000060011002700000003a0010019d00000080050000390000004001000039000000000051043500000001012001900000003d0000c13d0000000001000031000000040110008c000000a50000413d0000000101000367000000000101043b000000e0011002700000003c0210009c000000480000613d0000003d0110009c000000a50000c13d0000000001000416000000000110004c000000a50000c13d000000040100008a00000000011000310000003e02000041000000400310008c000000000300001900000000030240190000003e01100197000000000410004c000000000200a0190000003e0110009c00000000010300190000000001026019000000000110004c000000a50000c13d000600000005001d00e400d20000040f0000003f0110019700000000001004350000002001000039000700000001001d0000000000010435000000000100001900e400ac0000040f00000024020000390000000102200367000000000202043b000000000020043500000007020000290000000000120435000000000100001900e400ac0000040f00e400e20000040f0000000602000029000000000012043500000000010200190000000702000029000000000300001900e400bf0000040f0000000001000416000000000110004c000000a50000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000003b0300004100e400bf0000040f0000000001000416000000000110004c000000a50000c13d0000000001000031000000040210008a0000003e03000041000000400420008c000000000400001900000000040340190000003e02200197000000000520004c000000000300a0190000003e0220009c00000000020400190000000002036019000000000220004c000000a50000c13d00000001020003670000000403200370000000000303043b000300000003001d0000003f0330009c000000a50000213d0000002403200370000000000303043b000000400430009c000000a50000213d00000023043000390000003e05000041000000000614004b000000000600001900000000060580190000003e071001970000003e04400197000000000874004b0000000005008019000000000474013f0000003e0440009c00000000040600190000000004056019000000000440004c000000a50000c13d0000000404300039000000000242034f000000000402043b000000400240009c000000a50000213d00000024033000390000000602400210000200000003001d0000000002320019000000000112004b000000a50000213d0000000001000411000080060110008c000000a50000c13d0000002001000039000700000001001d0000000002000019000100000004001d000000000142004b000000a80000813d0000000601200210000600000002001d000000020200002900000000012100190000000102000367000000000312034f0000002001100039000000000112034f000000000101043b000500000001001d000000000103043b000400000001001d0000000301000029000000000010043500000007010000290000000000010435000000000100001900e400ac0000040f0000000402000029000000000020043500000007020000290000000000120435000000000100001900e400ac0000040f0000000002010019000000050100002900e400e00000040f000000060200002900000001040000290000000102200039000000840000013d0000000001000019000000000200001900e400c90000040f00000000010000190000000002000019000000000300001900e400bf0000040f0000003a0200004100000000030004140000003a0430009c00000000030280190000003a0410009c00000000010280190000004001100210000000c002300210000000000112019f00000041011001c7000080100200003900e400db0000040f0000000102200190000000bc0000613d000000000101043b000000000001042d0000000001000019000000000200001900e400c90000040f0000003a040000410000003a0510009c0000000001048019000000400110021000000000013100190000003a0320009c000000000204801900000060022002100000000001210019000000e50001042e0000003a030000410000003a0420009c00000000020380190000003a0410009c000000000103801900000040011002100000006002200210000000000112019f000000e60001043000000004010000390000000101100367000000000101043b000000420210009c000000d80000813d000000000001042d0000000001000019000000000200001900e400c90000040f000000de002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d000000e400000432000000e50001042e000000e600010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ad7e232e00000000000000000000000000000000000000000000000000000000310ab0898000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffffff02000000000000000000000000000000000000400000000000000000000000000000000000000000000000010000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008010": "0x0002000000000002000200000000000200010000000103550000006001100270000000280010019d00000080010000390000004004000039000000000014043500000000030004160000000102200190000000550000c13d000000000230004c0000005f0000c13d00000000020004120000002a022001970000000003000410000000000232004b0000005f0000c13d000000010500036700000000030000310000001f0630018f0000000004040433000000882730011a0000000507300270000000000870004c000000230000613d00000000080000190000000509800210000000000a940019000000000995034f000000000909043b00000000009a04350000000108800039000000000978004b0000001b0000413d000000000860004c000000320000613d0000000507700210000000000575034f00000000077400190000000306600210000000000807043300000000086801cf000000000868022f000000000505043b0000010006600089000000000565022f00000000056501cf000000000585019f0000000000570435000000800440008c000000620000c13d000000000323004900000088043000390000002c030000410000000001100031000000870220008c0000003e0000613d0000002d0200004100000000002104350000002e030000410000007f014000390000000000310435000000881240011a000100000002001d00000028212000c9000200000004001d009a00880000040f0000000102000029000000c00220021000000002030000290000001b033002100000002f033000410000003003300197000000000223019f00000031022001c7000000000301001900000000010200190000000002030019009a007f0000040f009a00790000040f000000200200003900000000010000190000000003000019009a006a0000040f000000000130004c0000005f0000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000002903000041009a006a0000040f00000000010000190000000002000019009a00720000040f0000002b01000041000000000010043500000001010000390000000402000039000000000012043500000024020000390000000001000019009a00720000040f0000002804000041000000280510009c000000000104801900000040011002100000006002200210000000000121019f00000000013100190000009b0001042e0000002803000041000000280410009c000000000103801900000040011002100000006002200210000000000121019f0000009c00010430000000000110004c0000007c0000613d000000000001042d00000000010000190000000002000019009a00720000040f00000028022001970000000003000414000000000323004b000000850000413d0000000001210420000000000001042d00000000010000190000000002000019009a00720000040f000000320210009c0000008b0000813d000000000001042d00000040010000390000000001010433000000440210003900000033030000410000000000320435000000240210003900000008030000390000000000320435000000340200004100000000002104350000000402100039000000200300003900000000003204350000006402000039009a00720000040f0000009a000004320000009b0001042e0000009c0001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff4e487b710000000000000000000000000000000000000000000000000000000081000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8000000000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000100000000000000000000000400000000000000000000000000000000000000000000000000000001000000004f766572666c6f7700000000000000000000000000000000000000000000000008c379a000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008004": "0x0002000000000002000100000000000200010000000103550000006001100270000000520010019d0000008005000039000000400100003900000000005104350000000101200190000000300000c13d0000000001000031000000040110008c000000710000413d0000000101000367000000000101043b000000e001100270000000540210009c0000003b0000613d000000550110009c000000710000c13d0000000001000416000000000110004c000000710000c13d000000040100008a00000000011000310000005602000041000000200310008c000000000300001900000000030240190000005601100197000000000410004c000000000200a019000000560110009c00000000010300190000000001026019000000000110004c000000710000c13d00000004010000390000000101100367000000000101043b000100000005001d014201400000040f00000001030000290000000000130435000000200200003900000000010300190000000003000019014200790000040f0000000001000416000000000110004c000000710000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000005303000041014200790000040f0000000001000416000000000110004c000000710000c13d0000000004000031000000040140008a0000005602000041000000400310008c000000000300001900000000030240190000005601100197000000000510004c000000000200a019000000560110009c00000000010300190000000001026019000000000110004c000000710000c13d00000001020003670000000401200370000000000101043b000000000310004c0000000003000019000000010300c039000000000331004b000000710000c13d0000002403200370000000000503043b000000570350009c000000710000213d00000023035000390000005606000041000000000743004b0000000007000019000000000706801900000056084001970000005603300197000000000983004b0000000006008019000000000383013f000000560330009c00000000030700190000000003066019000000000330004c000000710000c13d0000000403500039000000000232034f000000000302043b000000570230009c000000710000213d000000240250003900000005053002100000000005250019000000000445004b000000740000a13d00000000010000190000000002000019014200830000040f0142008c0000040f000000000100001900000000020000190000000003000019014200790000040f0000005204000041000000520510009c000000000104801900000040011002100000000001310019000000520320009c000000000204801900000060022002100000000001210019000001430001042e0000005203000041000000520420009c0000000002038019000000520410009c000000000103801900000040011002100000006002200210000000000112019f0000014400010430000a000000000002000700000003001d000600000002001d00000000030100190000000001000411000080010110008c0000012a0000c13d000000000130004c0000000001000019000000010100c039000500000001001d0000000101000039000400000001001d0000800d01000039000300000001001d0000000301000039000200000001001d0000000001000413000100000001001d0000000002000019000800000003001d0000000701000029000000000112004b000000df0000813d000a00000002001d0000000501200210000000060200002900000000012100190000000101100367000000000101043b000900000001001d014201400000040f0000000803000029000000000110004c000000dc0000c13d00000009020000290000005a012001970000005b0110009c000000e40000c13d0000005e01200198000000f60000613d000000000130004c000000cb0000613d00000001010000290000005205100197000000db012002700000005f01100197000000640310003900000000413500a900000000433100d9000000000335004b000001040000c13d0000000003020019000000610210009c0000010c0000813d0000000002000414000000000212004b000000e10000413d0000000001100420000000000110004c0000011b0000613d0000000002030019000000000002041d00000004010000290142013e0000040f00000052010000410000000002000414000000520320009c0000000001024019000000c00110021000000064011001c700000065040000410000000302000029000000020300002900000009050000290000000506000029014201390000040f00000008030000290000000101200190000000e10000613d0000000a020000290000000102200039000000a10000013d0000000a00000005000000000001042d00000000010000190000000002000019014200830000040f0000004001000039000000000101043300000064021000390000005c03000041000000000032043500000044021000390000005d030000410000000000320435000000240210003900000022030000390000000000320435000000590200004100000000002104350000000402100039000000200300003900000000003204350000008402000039014200830000040f0000004001000039000000000101043300000044021000390000006603000041000000000032043500000059020000410000000000210435000000240210003900000020030000390000000000320435000000040210003900000000003204350000006402000039014200830000040f0000006001000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019014200830000040f00000040010000390000000001010433000000440210003900000063030000410000000000320435000000240210003900000008030000390000000000320435000000590200004100000000002104350000000402100039000000200300003900000000003204350000006402000039014200830000040f00000040010000390000000001010433000000440210003900000062030000410000000000320435000000240210003900000014030000390000000000320435000000590200004100000000002104350000000402100039000000200300003900000000003204350000006402000039014200830000040f0000004001000039000000000101043300000044021000390000005803000041000000000032043500000024021000390000001f030000390000000000320435000000590200004100000000002104350000000402100039000000200300003900000000003204350000006402000039014200830000040f0000013c002104210000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d0000014200000432000001430001042e000001440001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e516761e000000000000000000000000000000000000000000000000000000004c6314f08000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff43616c6c61626c65206f6e6c792062792074686520626f6f746c6f616465720008c379a000000000000000000000000000000000000000000000000000000000ffff00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000007368000000000000000000000000000000000000000000000000000000000000496e636f72726563746c7920666f726d61747465642062797465636f64654861000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001fffe04e487b710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000004661696c656420746f20636861726765206761730000000000000000000000004f766572666c6f770000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c94722ff13eacf53547c4741dab5228353a05938ffcdd5d4a2d533ae0e618287436f6465206c656e67746820696e20776f726473206d757374206265206f6464", + "0x0000000000000000000000000000000000008008": "0x00020000000000020001000000000002000100000001035500000060011002700000004c0010019d00000080010000390000004008000039000000000018043500000001012001900000007a0000c13d0000000001000031000000040110008c000000850000413d0000000101000367000000000101043b0000004e011001970000004f0110009c000000850000c13d0000000001000416000000000110004c000000850000c13d0000000001000031000000040210008a0000005003000041000000200420008c000000000400001900000000040340190000005002200197000000000520004c000000000300a019000000500220009c00000000020400190000000002036019000000000220004c000000850000c13d00000001020003670000000403200370000000000303043b000000510430009c000000850000213d00000023043000390000005005000041000000000614004b0000000006000019000000000605801900000050011001970000005004400197000000000714004b0000000005008019000000000114013f000000500110009c00000000010600190000000001056019000000000110004c000000850000c13d0000000401300039000000000112034f000000000201043b000000520120009c000000880000813d0000001f01200039000000200400008a000000000141016f0000003f01100039000000000441016f00000000010804330000000004410019000000000514004b00000000050000190000000105004039000000510640009c000000880000213d0000000105500190000000880000c13d00000000004804350000000000210435000000240330003900000000043200190000000005000031000000000454004b000000850000213d000100000008001d0000001f0420018f000000010530036700000020031000390000000506200270000000000760004c000000610000613d000000000700001900000005087002100000000009830019000000000885034f000000000808043b00000000008904350000000107700039000000000867004b000000590000413d000000000740004c000000700000613d0000000506600210000000000565034f00000000066300190000000304400210000000000706043300000000074701cf000000000747022f000000000505043b0000010004400089000000000545022f00000000044501cf000000000474019f000000000046043500000000022300190000000000020435012a00ba0000040f000000010200002900000000030204330000000000130435000000200200003900000000010300190000000003000019012a00a70000040f0000000001000416000000000110004c000000850000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000004d03000041012a00a70000040f00000000010000190000000002000019012a00b10000040f0000005301000041000000000010043500000041010000390000000402000039000000000012043500000024020000390000000001000019012a00b10000040f0000004c030000410000004c0410009c000000000103801900000040011002100000004c0420009c00000000020380190000006002200210000000000112019f00000000020004140000004c0420009c0000000002038019000000c002200210000000000112019f00000054011001c70000801002000039012a01250000040f0000000102200190000000a40000613d000000000101043b000000000001042d00000000010000190000000002000019012a00b10000040f0000004c040000410000004c0510009c0000000001048019000000400110021000000000013100190000004c0320009c0000000002048019000000600220021000000000012100190000012b0001042e0000004c030000410000004c0420009c00000000020380190000004c0410009c000000000103801900000040011002100000006002200210000000000112019f0000012c000104300001000000000002000100000001001d00000000020104330000002001100039012a00900000040f000000010a00002900000000020a04330000005f03200039000000200200008a000000000323016f000000000901001900000000010004130000004c0410019700000000514300a9000000000530004c000000cf0000613d00000000533100d9000000000334004b000001090000c13d000000550310009c000001110000813d00000058011001970000000003000414000000000313004b000001060000413d0000000001100420000000000110004c000001060000613d0000000005000411000000000095041d000000400100003900000000010104330000002003000039000000000031043500000000030a04330000002004100039000000000034043500000040041000390000000006000019000000000736004b000000e90000813d000000000746001900000020066000390000000008a6001900000000080804330000000000870435000000e10000013d000000000443001900000000000404350000005f03300039000000000223016f0000004c030000410000004c0410009c000000000103801900000040011002100000004c0420009c00000000020380190000006002200210000000000112019f00000000020004140000004c0420009c0000000002038019000000c002200210000000000112019f00000054011001c70000800d02000039000000030300003900000059040000410000000006090019000100000009001d012a01200000040f00000001010000290000000102200190000001060000613d0000000100000005000000000001042d00000000010000190000000002000019012a00b10000040f0000005301000041000000000010043500000011010000390000000402000039000000000012043500000024020000390000000001000019012a00b10000040f00000040010000390000000001010433000000440210003900000056030000410000000000320435000000240210003900000008030000390000000000320435000000570200004100000000002104350000000402100039000000200300003900000000003204350000006402000039012a00b10000040f00000123002104210000000102000039000000000001042d0000000002000019000000000001042d00000128002104230000000102000039000000000001042d0000000002000019000000000001042d0000012a000004320000012b0001042e0000012c0001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000000000000000000000000000000ffffffff0000000000000000000000000000000000000000000000000000000062f84b24000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff00000000000000000000000000000000000000000000000100000000000000004e487b7100000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000004f766572666c6f7700000000000000000000000000000000000000000000000008c379a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffe03a36e47291f4201faf137fab081d92295bce2d53be2c6ca68ba82c7faa9ce2410000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000800a": "0x0004000000000002000600000000000200000000030100190000006003300270000000b60430019700030000004103550002000000010355000000b60030019d000100000000001f0000008001000039000000400600003900000000001604350000000101200190000000440000c13d0000000003000031000000040130008c000002410000413d0000000201000367000000000201043b000000e002200270000000b90420009c000000e90000613d000000ba0420009c0000010c0000613d000000bb0420009c000001270000613d000000bc0420009c0000013e0000613d000000bd0420009c000000550000613d000000be0120009c0000018f0000613d000000bf0120009c000000c10000613d000000c00120009c000002410000c13d0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000000310004c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d000600000006001d02d102b20000040f00000000020100190000002001200039000000d103000041000000000031043500000006010000290000000001010433000600000001001d02d1029d0000040f000000060300002900000000023100490000000001030019000000000300001902d102770000040f0000000001000416000000000110004c000002410000c13d0000000201000039000600000001001d02d102cf0000040f000000b701100197000000060200002902d102cd0000040f000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000000b80300004102d102770000040f000000040230008a000000c103000041000000200420008c00000000040000190000000004034019000000c102200197000000000520004c000000000300a019000000c10220009c00000000020400190000000002036019000000000220004c000002410000c13d0000000401100370000000000201043b000000c20120009c000002410000213d0000000001000410000000c20110019700000000001004350000002001000039000300000001001d0000000000010435000600000006001d000500000002001d02d102680000040f000400000001001d02d102cf0000040f00000000020004160000000001210049000000040200002902d102cd0000040f0000000101000039000400000001001d02d102cf0000040f00000000020004160000000001210049000000040200002902d102cd0000040f000000060500002900000000020004160000000001050433000000c9030000410000002004100039000000000034043500000005030000290000006003300210000000240410003900000000003404350000003803100039000000000023043500000038020000390000000000210435000000ca0210009c000001ce0000813d0000006003100039000400000003001d0000000000350435000000cb02000041000000000023043500000064021000390000000003000414000200000003001d00000003030000290000000000320435000000840210003902d1028a0000040f000000040200002900000000032100490000000201000029000000000402001902d102440000040f0000000104000031000000000110004c000001c30000c13d00000003030003670000001f0240018f000000060100002900000000010104330000000504400270000000000540004c000000b00000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000000a80000413d000000000520004c000000bf0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f0000000000240435000000010200003102d102810000040f0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000000310004c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d0000000001060433000500000001001d000600000006001d02d102a40000040f00000005030000290000002001300039000000c302000041000000000021043500000003010000390000000000130435000000000103001900000006020000290000000003020433000600000003001d00000020020000390000000000230435000000200230003902d1028a0000040f000000060300002900000000023100490000000001030019000000000300001902d102770000040f0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000200310008c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d00000004010000390000000201100367000000000101043b000000c20110019700000000001004350000002001000039000500000001001d0000000000010435000600000006001d02d102680000040f02d102cf0000040f00000006020000290000000002020433000000000012043500000000010200190000000502000029000000000300001902d102770000040f0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000000310004c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d0000000101000039000600000006001d02d102cf0000040f00000006020000290000000003020433000000000013043500000020020000390000000001030019000000000300001902d102770000040f0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000000310004c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d0000000001060433000000120200003900000000002104350000002002000039000000000300001902d102770000040f0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000400310008c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d00000002010003670000000402100370000000000302043b000000c20230009c000002410000213d0000002401100370000000000201043b0000000001000411000080010110008c000001d60000c13d000400000003001d000600000006001d0000000101000039000500000002001d02d102cf0000040f000000000201001900000005010000290000000001120019000000000221004b000000000200001900000001020040390000000102200190000002210000c13d000000010200003902d102cd0000040f000000040100002900000000001004350000002001000039000000000001043502d102680000040f000300000001001d02d102cf0000040f000000050300002900000000020100190000000001320019000000000221004b000000000200001900000001020040390000000102200190000002210000c13d000000030200002902d102cd0000040f0000000601000029000000000101043300000005020000290000000000210435000000b6020000410000000003000414000000b60430009c0000000003028019000000b60410009c00000000010280190000004001100210000000c002300210000000000112019f000000c4011001c70000800d020000390000000203000039000000d004000041000000040500002902d102c30000040f0000000101200190000002410000613d000001fd0000013d0000000001000416000000000110004c000002410000c13d000000040100008a0000000001100031000000c102000041000000600310008c00000000030000190000000003024019000000c101100197000000000410004c000000000200a019000000c10110009c00000000010300190000000001026019000000000110004c000002410000c13d00000002010003670000000402100370000000000302043b000000c20230009c000002410000213d0000002402100370000000000402043b000000c20240009c000002410000213d00000000020004110000004401100370000000000501043b000080060120008c000002010000613d000000090100008a000000000112016f000080010110008c000002010000613d00000000010604330000006402100039000000c60300004100000000003204350000004402100039000000c703000041000000000032043500000024021000390000003e030000390000000000320435000000c8020000410000000000210435000000040210003900000020030000390000000000320435000000840200003902d102810000040f000000200140008c000000200100003900000000010440190000001f01100039000000600110018f00000004020000290000000001210019000000cc0210009c00000006020000290000000506000029000001e40000a13d000000ce0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001902d102810000040f00000000010604330000004402100039000000cf03000041000000000032043500000024021000390000001f030000390000000000320435000000c8020000410000000000210435000000040210003900000020030000390000000000320435000000640200003902d102810000040f0000000000120435000000200140008c000002410000413d0000000001000416000000000016041d000000000102043300000000020004160000000000210435000000b6020000410000000003000414000000b60430009c0000000003028019000000b60410009c00000000010280190000004001100210000000c002300210000000000112019f000000c4011001c70000800d020000390000000303000039000000cd04000041000000000500041102d102c30000040f0000000101200190000002410000613d00000000010000190000000002000019000000000300001902d102770000040f000400000004001d000600000006001d000100000003001d00000000003004350000002001000039000300000001001d0000000000010435000500000005001d02d102680000040f000200000001001d02d102cf0000040f0000000503000029000000000231004b000002210000413d0000000001310049000000020200002902d102cd0000040f000000040100002900000000001004350000000301000029000000000001043502d102680000040f000300000001001d02d102cf0000040f000000050300002900000000020100190000000001320019000000000221004b000000000200001900000001020040390000000102200190000002290000613d000000ce0100004100000000001004350000001101000039000000040200003900000000001204350000002402000039000000000100001902d102810000040f000000030200002902d102cd0000040f0000000601000029000000000101043300000005020000290000000000210435000000b6020000410000000003000414000000b60430009c0000000003028019000000b60410009c00000000010280190000004001100210000000c002300210000000000112019f000000c4011001c70000800d020000390000000303000039000000c5040000410000000105000029000000040600002902d102c30000040f0000000101200190000001fd0000c13d0000000001000019000000000200001902d102810000040f0001000000000002000100000004001d000000b604000041000000b60520009c00000000020480190000004002200210000000b60530009c00000000030480190000006003300210000000000223019f000000b60310009c0000000001048019000000c001100210000000000112019f000080080200003902d102c30000040f0000000106000029000000010220018f000000000300001900000005043002100000000005460019000000000441034f000000000404043b00000000004504350000000103300039000000000430004c000000000400001900000001040060390000000104400190000002570000c13d00030000000103550000006001100270000100b60010019d00000000010200190000000100000005000000000001042d000000b6010000410000000002000414000000b60320009c0000000001024019000000c001100210000000d2011001c7000080100200003902d102c80000040f0000000102200190000002740000613d000000000101043b000000000001042d0000000001000019000000000200001902d102810000040f000000b604000041000000b60510009c000000000104801900000040011002100000000001310019000000b60320009c000000000204801900000060022002100000000001210019000002d20001042e000000b603000041000000b60420009c0000000002038019000000b60410009c000000000103801900000040011002100000006002200210000000000112019f000002d3000104300000000003010433000000000032043500000020022000390000000004000019000000000534004b000002960000813d000000000542001900000020044000390000000006140019000000000606043300000000006504350000028e0000013d000000000132001900000000000104350000001f01300039000000200300008a000000000131016f0000000001120019000000000001042d0000000003020019000000200200003900000000002104350000002002100039000000000103001902d1028a0000040f000000000001042d000000d30210009c000002aa0000813d000000400110003900000040020000390000000000120435000000000001042d000000ce0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001902d102810000040f00000040020000390000000001020433000000d30310009c000002bb0000813d0000004003100039000000000032043500000005020000390000000000210435000000000001042d000000ce0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001902d102810000040f000002c6002104210000000102000039000000000001042d0000000002000019000000000001042d000002cb002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d000002d100000432000002d20001042e000002d300010430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009cc7f7080000000000000000000000000000000000000000000000000000000018160ddd00000000000000000000000000000000000000000000000000000000313ce5670000000000000000000000000000000000000000000000000000000040c10f190000000000000000000000000000000000000000000000000000000051cff8d900000000000000000000000000000000000000000000000000000000579952fc0000000000000000000000000000000000000000000000000000000095d89b410000000000000000000000000000000000000000000000000000000006fdde038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff45544800000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000020000000000000000000000000ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef616c206163636573732063616e2063616c6c2074686973206d6574686f6400004f6e6c792073797374656d20636f6e747261637473207769746820737065636908c379a0000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffa062f84b2400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff2717ead6b9200dd235aad468c9809ea400fe33ac69b5bfaa6d3e90fc922b63984e487b710000000000000000000000000000000000000000000000000000000043616c6c61626c65206f6e6c792062792074686520626f6f746c6f61646572000f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d412139688545746865720000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffc00000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008009": "0x0008000000000002000400000000000200000000060100190000006006600270000000740760019700070000007103550006000000010355000300000003001f000400000004001f000500000005001f000000740060019d000200000002001f000100000000001f00000080010000390000004007000039000000000017043500000001022001900000002c0000c13d000000020200003900000002032001870000000102300270000000000330004c00000000030004110000001b0000c13d000000760230009c00000000020000190000000102004039000000000220004c000000370000c13d0000007d0200004100000000002104350000002003000039000000840200003900000000003204350000002403000039000000a40400003900000000003404350000007e03000041000000c40400003900000000003404350000007f03000041000000e404000039000000000034043501cc00c90000040f0000000001000416000000000110004c000000660000c13d000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000000750300004101cc00bf0000040f0000000504000031000000770200004100000004052001870000000306000031000000000260004c000400000004001d000300000005001d0000004d0000c13d00000000000604170000000001000031000000000201001901cc00d20000040f00000000030100190000000401000029000000010410018f0000000002000411000000030100002901cc01120000040f00000000020104330000002001100039000000000300001901cc00bf0000040f0000007804000041000000a00200003900000000004204350000007703300197000000a4040000390000000000340435000000c4030000390000000000530435000000e403000039000100000006001d0000000000630435000000640300003900000000003104350000012003000039000200000007001d000000000037043500000000040004140000000003010433000000000104001901cc00ab0000040f0000000102000031000000000320004c000000690000c13d000000000110004c000000a20000c13d0000000001000019000000000200001901cc00c90000040f000000790320009c00000002070000290000009a0000813d0000003f03200039000000200400008a000000000443016f00000000030704330000000004430019000000000534004b000000000500001900000001050040390000007a0640009c0000009a0000213d00000001055001900000009a0000c13d000000000047043500000000002304350000002002300039000000070300036700000001050000310000001f0450018f0000000505500270000000000650004c0000008a0000613d000000000600001900000005076002100000000008720019000000000773034f000000000707043b00000000007804350000000106600039000000000756004b000000820000413d000000000640004c000000640000613d0000000505500210000000000353034f00000000025200190000000304400210000000000502043300000000054501cf000000000545022f000000000303043b0000010004400089000000000343022f00000000034301cf000000000353019f0000000000320435000000640000013d0000007b0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001901cc00c90000040f00000001060000290000007c0160009c000000040400002900000003050000290000003f0000413d00000000010000190000000002000019000000000300001901cc00bf0000040f0000007404000041000000740520009c00000000020480190000004002200210000000740530009c00000000030480190000006003300210000000000223019f000000740310009c0000000001048019000000c001100210000000000112019f0000800a0200003901cc01c10000040f00000000030100190000006003300270000100740030019d0007000000010355000000010120018f000000000001042d0000007404000041000000740510009c000000000104801900000040011002100000000001310019000000740320009c000000000204801900000060022002100000000001210019000001cd0001042e0000007403000041000000740420009c0000000002038019000000740410009c000000000103801900000040011002100000006002200210000000000112019f000001ce00010430000000790310009c000001070000813d0000003f03100039000000200400008a000000000543016f000000400400003900000000030404330000000005530019000000000635004b000000000600001900000001060040390000007a0750009c000001070000213d0000000106600190000001070000c13d00000000005404350000000000130435000000000221004b0000010f0000213d00000006050003670000001f0410018f00000020023000390000000506100270000000000760004c000000f40000613d000000000700001900000005087002100000000009820019000000000885034f000000000808043b00000000008904350000000107700039000000000867004b000000ec0000413d000000000740004c000001030000613d0000000506600210000000000565034f00000000066200190000000304400210000000000706043300000000074701cf000000000747022f000000000505043b0000010004400089000000000545022f00000000044501cf000000000474019f0000000000460435000000000112001900000000000104350000000001030019000000000001042d0000007b0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001901cc00c90000040f0000000001000019000000000200001901cc00c90000040f0005000000000002000300000004001d000200000003001d000400000002001d000500000001001d000000000103043301cc01af0000040f000100000001001d000000000100041401cc01af0000040f0000000202000029000000400220021000000080022000410000008102200197000000010300002900000060033002100000008203300197000000000223019f000000c0011002100000008301100197000000000112019f00000084021001c70000000303000029000000000330004c000000000102c0190000000402000029000000770d200197000000050200002901cc01c60000040f000000000302001900000000020100190000006002200270000100740020019d000000740220019700070000000103550000000103300190000001880000613d0000003f012000390000008504100197000000400300003900000000010304330000000005410019000000000615004b000000000600001900000001060040390000007a0750009c000001a40000213d0000000106600190000001a40000c13d000000000053043500000000002104350000002003100039000000200640008a0000001f0460018f000000000500003100000006055003670000000506600270000000000760004c000001560000613d000000000700001900000005087002100000000009830019000000000885034f000000000808043b00000000008904350000000107700039000000000867004b0000014e0000413d000000000740004c000001650000613d0000000506600210000000000565034f00000000066300190000000304400210000000000706043300000000074701cf000000000747022f000000000505043b0000010004400089000000000545022f00000000044501cf000000000474019f000000000046043500000000002104350000000104000031000000000442004b000001ac0000213d00000007050003670000001f0420018f0000000502200270000000000620004c000001770000613d000000000600001900000005076002100000000008730019000000000775034f000000000707043b00000000007804350000000106600039000000000726004b0000016f0000413d000000000640004c000001860000613d0000000502200210000000000525034f00000000022300190000000303400210000000000402043300000000043401cf000000000434022f000000000505043b0000010003300089000000000535022f00000000033501cf000000000343019f00000000003204350000000500000005000000000001042d0000001f0420018f0000000503200270000000000530004c000001940000613d00000000050000190000000506500210000000000761034f000000000707043b00000000007604350000000105500039000000000635004b0000018d0000413d000000000540004c000001a20000613d00000003044002100000000503300210000000000503043300000000054501cf000000000545022f000000000131034f000000000101043b0000010004400089000000000141022f00000000014101cf000000000151019f0000000000130435000000000100001901cc00c90000040f0000007b0100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001901cc00c90000040f0000000001000019000000000200001901cc00c90000040f000000860210009c000001b20000813d000000000001042d000000400100003900000000010104330000004402100039000000870300004100000000003204350000002402100039000000080300003900000000003204350000007d020000410000000000210435000000040210003900000020030000390000000000320435000000640200003901cc00c90000040f000001c4002104210000000102000039000000000001042d0000000002000019000000000001042d000000000f0d0019000001ca002104290000000102000039000000000001042d0000000002000019000000000001042d000001cc00000432000001cd0001042e000001ce00010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff579952fc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff4e487b7100000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000008c379a00000000000000000000000000000000000000000000000000000000054686973206d6574686f6420726571756972652073797374656d2063616c6c20666c61670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000ffffffff000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000ffffffff000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ffffffe000000000000000000000000000000000000000000000000000000001000000004f766572666c6f770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000008003": "0x0005000000000002000500000000000200000000030100190000006003300270000000dc0430019700040000004103550003000000010355000000dc0030019d000200000002001f000100000000001f00000080010000390000004005000039000000000015043500000001012001900000007d0000c13d0000000001000031000000040110008c000002800000413d0000000301000367000000000101043b000000e001100270000000de0210009c000001630000613d000000df0210009c000000880000613d000000e00210009c000001860000613d000000e10210009c000000c30000613d000000e20210009c000001ba0000613d000000e30210009c000001dc0000613d000000e40210009c000000ed0000613d000000e50210009c000001080000613d000000e60210009c000001290000613d000000e70110009c000002800000c13d0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000400310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d000000010100003900000003020003670000002403200370000000000303043b000200000003001d0000000402200370000000000202043b000100000002001d0000000202000039000000020220018800000000020004110000004a0000c13d000000ea0120009c00000000010000190000000101004039000400000002001d036c03260000040f00000005010000290000000004010433000000f30100004100000000001404350000000401000029000000e90210019700000004034000390000000001000414000000000023043500000024030000390000000002040019000300000004001d036c02ba0000040f0000000104000031000000000110004c000002120000c13d00000004030003670000001f0240018f000000050100002900000000010104330000000504400270000000000540004c0000006c0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000000640000413d000000000520004c0000007b0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f00000000002404350000000102000031036c02f80000040f0000000001000416000000000110004c000002800000c13d000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000000dd03000041036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d00000004010000390000000301100367000000000201043b000000e90120009c000002800000213d000400000002001d000500000005001d000000010100003900000002020000390000000202200188000000a80000c13d0000000001000411000000ea0110009c00000000010000190000000101004039036c03260000040f0000000001000411000080060110008c000002580000c13d000000040100002900000000001004350000002001000039000200000001001d00000000000104350000000001000019036c02db0000040f036c036a0000040f000300000001001d0000000401000029036c03110000040f00000000020100190000000301000029000000f201100041036c03680000040f0000000301000029000000800210027000000005010000290000000001010433000000000021043500000002020000290000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d0000000001000411000000000010043500000001010000390000002002000039000400000002001d00000000001204350000000001000019000500000005001d036c02db0000040f00000004020000390000000302200367000000000202043b0000000000200435000000040200002900000000001204350000000001000019036c02db0000040f036c036a0000040f000000050200002900000000020204330000000000120435000000000102001900000004020000290000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d036c03010000040f036c031d0000040f000000050200002900000000030204330000000000130435000000200200003900000000010300190000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000400310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d036c03010000040f00000024020000390000000302200367000000000202043b036c033b0000040f00000005020000290000000003020433000000000110004c0000000001000019000000010100c0390000000000130435000000200200003900000000010300190000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d000000010100003900000002020000390000000202200188000001430000c13d0000000001000411000000ea0110009c00000000010000190000000101004039036c03260000040f0000000001000411000300000001001d00000000001004350000002001000039000400000001001d00000000000104350000000001000019036c02db0000040f036c036a0000040f00000004020000390000000302200367000000000202043b0000000003010019000000eb01300197000000000121004b0000022e0000c13d00000003010000290000000000100435000000040100002900000000000104350000000001000019000500000003001d036c02db0000040f000000000201001900000005010000290000000101100039036c03680000040f000000000100001900000000020000190000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d036c03010000040f000000e90110019700000000001004350000002001000039000400000001001d00000000000104350000000001000019036c02db0000040f036c036a0000040f0000000502000029000000000202043300000080011002700000000000120435000000000102001900000004020000290000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d000000010100003900000004020000390000000302200367000000000202043b000400000002001d00000002020000390000000202200188000001a40000c13d0000000001000411000000ea0110009c00000000010000190000000101004039036c03260000040f0000000402000029000000ef0120009c0000023d0000413d000000050100002900000000010104330000006402100039000000f00300004100000000003204350000004402100039000000f1030000410000000000320435000000240210003900000030030000390000000000320435000000ed0200004100000000002104350000000402100039000000200300003900000000003204350000008402000039036c02f80000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000200310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d000500000005001d036c03010000040f000000e90110019700000000001004350000002001000039000400000001001d00000000000104350000000001000019036c02db0000040f036c036a0000040f000000050200002900000000020204330000000000120435000000000102001900000004020000290000000003000019036c02ee0000040f0000000001000416000000000110004c000002800000c13d000000040100008a0000000001100031000000e802000041000000600310008c00000000030000190000000003024019000000e801100197000000000410004c000000000200a019000000e80110009c00000000010300190000000001026019000000000110004c000002800000c13d00000003020003670000000401200370000000000101043b000000e90310009c000002800000213d0000004403200370000000000403043b000000000340004c0000000003000019000000010300c039000000000334004b000002800000c13d000400000004001d000500000005001d0000002402200370000000000202043b036c033b0000040f000000000110004c000002630000c13d0000000401000029000000000110004c0000000501000029000002710000613d00000000010104330000004402100039000000ee03000041000000000032043500000024021000390000001d030000390000000000320435000000ed0200004100000000002104350000000402100039000000200300003900000000003204350000006402000039036c02f80000040f000000400140008c000000400100003900000000010440190000001f01100039000000e00210018f00000003050000290000000001520019000000000221004b00000000020000190000000102004039000000f40310009c000002260000213d0000000102200190000002260000c13d00000005020000290000000000120435000000400240008c000002800000413d000000f50210009c000002750000a13d000000f801000041000000000010043500000041010000390000000402000039000000000012043500000024020000390000000001000019036c02f80000040f000000050100002900000000010104330000004402100039000000ec03000041000000000032043500000024021000390000000f030000390000000000320435000000ed0200004100000000002104350000000402100039000000040300002900000000003204350000006402000039036c02f80000040f0000000001000411000100000001001d00000000001004350000002001000039000200000001001d00000000000104350000000001000019036c02db0000040f036c036a0000040f000300000001001d0000000101000029036c03110000040f000000040200002900000003030000290000000002230019000000000301001900000000010200190000000002030019036c03680000040f0000000301000029000000eb0210019700000005010000290000000001010433000000000021043500000002020000290000000003000019036c02ee0000040f00000005010000290000000001010433000000ed020000410000000000210435000000040210003900000020030000390000000000320435000000240210003900000000000204350000004402000039036c02f80000040f0000000401000029000000000110004c0000000501000029000002710000c13d0000000002010433000500000002001d000000ed0100004100000000001204350000000401200039036c03590000040f000000050300002900000000023100490000000001030019036c02f80000040f000000000100001900000000020000190000000003000019036c02ee0000040f0000004002100039000000050400002900000000002404350000000002050433000000010320008c000002800000213d000000000021043500000020025000390000000002020433000000010320008c000002830000a13d00000000010000190000000002000019036c02f80000040f000000200110003900000000002104350000000201000029000000000110004c000002960000c13d00000000010404330000004402100039000000f703000041000000000032043500000024021000390000001f030000390000000000320435000000ed0200004100000000002104350000000402100039000000200300003900000000003204350000006402000039036c02f80000040f000000000120004c000002af0000c13d0000000101000029000000000110004c000002af0000613d0000000101000029000000010210008a0000000401000029036c033b0000040f000000000110004c000002af0000c13d000000050100002900000000010104330000004402100039000000f6030000410000000000320435000000ed020000410000000000210435000000240210003900000020030000390000000000320435000000040210003900000000003204350000006402000039036c02f80000040f0000000401000029036c030a0000040f0000000102000029036c03170000040f00000000020100190000000201000029036c03680000040f000000000100001900000000020000190000000003000019036c02ee0000040f0001000000000002000100000004001d000000dc04000041000000dc0520009c00000000020480190000004002200210000000dc0530009c00000000030480190000006003300210000000000223019f000000dc0310009c0000000001048019000000c001100210000000000112019f0000800602000039036c03630000040f0000000106000029000000010220018f000000000300001900000005043002100000000005460019000000000441034f000000000404043b00000000004504350000000103300039000000020430008c000002cd0000413d00040000000103550000006001100270000100dc0010019d00000000010200190000000100000005000000000001042d000000dc020000410000000003000414000000dc0430009c0000000003028019000000dc0410009c00000000010280190000004001100210000000c002300210000000000112019f000000f9011001c70000801002000039036c03630000040f0000000102200190000002eb0000613d000000000101043b000000000001042d00000000010000190000000002000019036c02f80000040f000000dc04000041000000dc0510009c000000000104801900000040011002100000000001310019000000dc0320009c0000000002048019000000600220021000000000012100190000036d0001042e000000dc03000041000000dc0420009c0000000002038019000000dc0410009c000000000103801900000040011002100000006002200210000000000112019f0000036e0001043000000004010000390000000301100367000000000101043b000000fa0210009c000003070000813d000000000001042d00000000010000190000000002000019036c02f80000040f00000000001004350000000101000039000000200200003900000000001204350000000001000019036c02db0000040f000000000001042d0000000000100435000000200100003900000000000104350000000001000019036c02db0000040f000000000001042d0000000000200435000000200200003900000000001204350000000001000019036c02db0000040f000000000001042d000000e9011001970000000000100435000000200100003900000000000104350000000001000019036c02db0000040f036c036a0000040f000000eb01100197000000000001042d000000000110004c000003290000613d000000000001042d000000400100003900000000010104330000006402100039000000fb0300004100000000003204350000004402100039000000fc030000410000000000320435000000240210003900000024030000390000000000320435000000ed0200004100000000002104350000000402100039000000200300003900000000003204350000008402000039036c02f80000040f0002000000000002000200000002001d000100000001001d036c031d0000040f00000001020000390000000203000029000000000131004b000003560000213d0000000101000029000000e901100197000000000010043500000001010000390000002002000039000100000002001d00000000001204350000000001000019036c02db0000040f00000002020000290000000000200435000000010200002900000000001204350000000001000019036c02db0000040f036c036a0000040f000000000110004c0000000002000019000000010200c039000000010120018f0000000200000005000000000001042d0000004002100039000000fd03000041000000000032043500000020021000390000001c030000390000000000320435000000200200003900000000002104350000006001100039000000000001042d00000366002104230000000102000039000000000001042d0000000002000019000000000001042d000000000012041b000000000001042d000000000101041a000000000001042d0000036c000004320000036d0001042e0000036e00010430000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fb1a9a5700000000000000000000000000000000000000000000000000000000306395c60000000000000000000000000000000000000000000000000000000038a780920000000000000000000000000000000000000000000000000000000055d35d18000000000000000000000000000000000000000000000000000000005aa9b6b5000000000000000000000000000000000000000000000000000000006ee1dc2000000000000000000000000000000000000000000000000000000000896909dc00000000000000000000000000000000000000000000000000000000cab7e8eb00000000000000000000000000000000000000000000000000000000e1239cd800000000000000000000000000000000000000000000000000000000155fd27a8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000ffffffffffffffffffffffffffffffff496e636f7272656374206e6f6e6365000000000000000000000000000000000008c379a000000000000000000000000000000000000000000000000000000000546865206e6f6e636520776173206e6f7420736574206173207573656400000000000000000000000000000000000000000000000000000000000001000000016f6e636520697320746f6f2068696768000000000000000000000000000000005468652076616c756520666f7220696e6372656d656e74696e6720746865206e00000000000000000000000000000001000000000000000000000000000000007b510fe800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffffbf50726576696f7573206e6f6e636520686173206e6f74206265656e20757365644e6f6e63652076616c75652063616e206e6f742062652073657420746f2030004e487b710000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000400000000000000000000000000000000000000000000000010000000000000000000000000000000000000000666c61670000000000000000000000000000000000000000000000000000000054686973206d6574686f6420726571756972652073797374656d2063616c6c2052657573696e67207468652073616d65206e6f6e6365207477696365000000000000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000002": "0x00020000000000020002000000000002000100000001035500000060011002700000002b0010019d000000800300003900000040010000390000000000310435000000000300041600000001022001900000005e0000c13d000000000230004c000000680000c13d00000000020004120000002d022001970000000003000410000000000232004b000000680000c13d000000400200008a0000000004000031000000000224016f000000400320003900000080022000390000003f0540018f000000370550008c000000000a030019000000000a02201900000006053002700000000006000019000000010600203900000001030003670000001f0240018f00000000010104330000000504400270000000000740004c0000002d0000613d000000000700001900000005087002100000000009810019000000000883034f000000000808043b00000000008904350000000107700039000000000847004b000000250000413d0000000005650019000200000005001d000000000520004c0000003e0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000000021000310000002e0300004100000000003204350000000002a10019000000080220008a000000c30300003900000000033001ff0000000000320435000000800110008c0000000001000019000000010100603900010000000a001d00a600800000040f000000020100002900000007211000c900a600940000040f0000000202000029000000c00220021000000001030000290000001b033002100000002f03300197000000000223019f00000030022001c700000000030100190000000001020019000000000203001900a6008b0000040f00a6007a0000040f00000020020000390000000001000019000000000300001900a6006b0000040f000000000130004c000000680000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000002c0300004100a6006b0000040f0000000001000019000000000200001900a600730000040f0000002b040000410000002b0510009c000000000104801900000040011002100000006002200210000000000121019f0000000001310019000000a70001042e0000002b030000410000002b0410009c000000000103801900000040011002100000006002200210000000000121019f000000a800010430000000000110004c0000007d0000613d000000000001042d0000000001000019000000000200001900a600730000040f000000000110004c000000830000613d000000000001042d000000310100004100000000001004350000000101000039000000040200003900000000001204350000002402000039000000000100001900a600730000040f0000002b022001970000000003000414000000000323004b000000910000413d0000000001210420000000000001042d0000000001000019000000000200001900a600730000040f000000320210009c000000970000813d000000000001042d0000004001000039000000000101043300000044021000390000003303000041000000000032043500000024021000390000000803000039000000000032043500000034020000410000000000210435000000040210003900000020030000390000000000320435000000640200003900a600730000040f000000a600000432000000a70001042e000000a80001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000000000000000000000000000000000000000000010000000000000000000000044e487b710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000004f766572666c6f7700000000000000000000000000000000000000000000000008c379a000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000800c": "0x00020000000000020016000000000002000100000001035500000060011002700000026a0010019d000000800c00003900000040060000390000000000c604350000000101200190000000830000c13d0000000001000031000000040110008c000006380000413d0000000101000367000000000101043b0000026c011001970000026d0110009c000006380000c13d0000000001000416000000000110004c000006380000c13d000000040100008a00000000011000310000026e02000041000000200310008c000000000300001900000000030240190000026e04100197000000000540004c000000000200a0190000026e0440009c000000000203c019000000000220004c000006380000c13d000000010d0003670000000402d00370000000000502043b0000026f0250009c000006380000213d00000000015100490000026e02000041000002600310008c000000000300001900000000030240190000026e01100197000000000410004c000000000200a0190000026e0110009c00000000010300190000000001026019000000000110004c000006380000c13d000000040350003900000000013d034f000000000101043b000000000210004c000001040b500039000000a40a500039000000640950003900000044085000390000012407500039000001c402500039000000c404500039000b00000004001d0000014404500039000d00000004001d001400000006001d001600000003001d001500000002001d000e00000005001d001200000007001d001100000008001d001000000009001d000f0000000a001d000c0000000b001d0000008e0000c13d0000000001bd034f000000000101043b00130000000d035309a108b10000040f0000000f03000029000000130200035f000000000232034f000000000202043b000a00000001001d000000000102001909a108b10000040f0000001003000029000000130200035f000000000232034f000000000202043b000900000001001d000000000102001909a108b10000040f0000000002010019000000090100002909a1078d0000040f0000001102000029000000130300035f000000000223034f000000000202043b0000027302200197000900000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b000700000001001d000000000102001909a108b10000040f000800000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c0000016a0000c13d0000001601000029000000150200002909a107120000040f000000000220004c000001c80000c13d000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f0000000001000416000000000110004c000006380000c13d0000002002000039000001000100003900000000002104390000012002000039000000000002043900000040020000390000026b0300004109a106f50000040f000000710410008c0000016e0000c13d000000000103001900130000000c001d09a107120000040f000000000300003109a1073f0000040f0000000002010433000000200110003909a106cf0000040f0000000e02000029000000140a000029000002040320003900000000040000310000000002240049000000230520008a0000000102000367000000000332034f000000000303043b0000026e06000041000000000753004b000000000700001900000000070680190000026e055001970000026e08300197000000000958004b0000000006008019000000000558013f0000026e0550009c00000000050700190000000005066019000000000b010019000000000150004c000006380000c13d00000016010000290000000001130019000000000312034f000000000503043b0000026f0350009c000006380000213d0000000503500210000000000434004900000020061000390000026e01000041000000000746004b000000000700001900000000070120190000026e044001970000026e08600197000000000948004b0000000001008019000000000448013f0000026e0440009c000000000107c019000000000110004c000006380000c13d00000000040a043300000020014000390000027605500197000000000750004c000000d50000613d000000000262034f000000000600001900000005076002100000000008710019000000000772034f000000000707043b00000000007804350000000106600039000000000756004b000000cd0000413d000000000200004c000000d70000613d00000000003404350000003f02300039000000200300008a000000000232016f0000000002240019000000000342004b000000000300001900000001030040390000026f0520009c000003b10000213d0000000103300190000003b10000c13d000000140300002900000000002304350000000002040433000a0000000b001d09a106cf0000040f000900000001001d0000000e010000290000022402100039000000160100002909a107120000040f000000000300003109a1073f0000040f0000000002010433000000200110003909a106cf0000040f0000000e04000029000000e4024000390000002403400039000000840440003900000001050003670000001006000029000000000665034f0000001107000029000000000775034f000000000835034f0000001203000029000000000935034f0000000c03000029000000000a35034f000000000225034f0000000b03000029000000000b35034f0000000f03000029000000000c35034f000000000d45034f0000001603000029000000000335034f000000000303043b000000000408043b000000000507043b000000000606043b00000000070d043b00000000080c043b000000000b0b043b000000000c02043b000000000a0a043b000000000909043b00000014020000290000000002020433000001c00d20003900000000001d0435000001a001200039000000090d0000290000000000d1043500000180012000390000000a0d0000290000000000d104350000016001200039000000000091043500000140012000390000000000a1043500000120012000390000000000c1043500000100012000390000000000b10435000000e0012000390000000000810435000000c0012000390000000000710435000000a0012000390000000000610435000000800120003900000000005104350000006001200039000000000041043500000040012000390000000000310435000001c0010000390000000000120435000002770300004100000020012000390000000000310435000002780320009c000003b10000213d000001e00320003900000014040000290000000000340435000000000202043309a106cf0000040f00000014020000290000000002020433000a00000002001d00000272020000410000000000200439000900000001001d09a106e60000040f00000279020000410000000a04000029000000600340003900000000002304350000027a0200004100000040034000390000000000230435000000800240003900000000001204350000027b02000041000000200140003900000000002104350000001302000029000000000024043500000000030400190000027c0240009c000003b10000213d000000a00230003900000014040000290000000000240435000000000203043309a106cf0000040f000000140200002900000000040204330000004202400039000000090300002900000000003204350000027d020000410000002003400039000400000003001d000000000023043500000022024000390000000000120435000000420100003900000000001404350000027e0140009c000003b10000213d0000008001400039000003910000013d09a108e60000040f00000001050003670000000002010019000001de0000013d000000020410008c000002720000c13d0000027201000041000000000010043909a106e60000040f09a108b10000040f000000010200036700130000000203530000000c03000029000000000232034f000000000202043b000a00000001001d000000000102001909a108b10000040f0000000b02000029000000130300035f000000000223034f000000000202043b000900000001001d000000000102001909a108b10000040f0000000f03000029000000130200035f000000000232034f000000000202043b000800000001001d000000000102001909a108b10000040f0000001002000029000000130300035f000000000223034f000000000202043b000700000001001d000000000102001909a108b10000040f0000001102000029000000130300035f000000000223034f000000000202043b0000027302200197001300000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b000600000001001d000000000102001909a108b10000040f00000000070100190000000a010000290000000902000029000000080300002900000007040000290000001305000029000000060600002909a108110000040f000a00000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c000002c40000c13d0000001601000029000000150200002909a107120000040f000000000220004c0000007b0000613d0000000101100367000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039001300000002001d000000000110004c000002c60000c13d09a107b40000040f001300000001001d000002c60000013d0000000105000367000000000115034f000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039000000000110004c000001de0000c13d001300000005035309a107b40000040f000000130500035f00000000020100190000000d01000029000000000115034f0000006004000039000000000101043b000000000110004c000500000002001d000002020000613d0000027201000041000000000010043909a106e60000040f09a108b10000040f00000014020000290000000002020433001300000002001d000000200220003909a107800000040f00000013040000290000027f02000041000000000021043500000000014100490000001e0210008a00000000002404350000002101100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000003b10000213d0000000102200190000003b10000c13d000000140200002900000000001204350000000502000029001300000004001d0000000001020433000600000001001d00000008010000290000000001010433000400000001001d00000007010000290000000001010433000300000001001d00000009010000290000000001010433000200000001001d0000000a010000290000000001010433000100000001001d0000001601000029000000150200002909a107120000040f00000002010000290000000103000029000000000131001900000003030000290000000001310019000000040300002900000000013100190000000603000029000000000131001900000000012100190000001302000029000000000202043300000000012100190000026f0110019709a1094a0000040f000400000001001d0000001601000029000000150200002909a107120000040f00000014030000290000000003030433000300000001001d000200000002001d000600000003001d00000020023000390000000401000029000400000002001d09a107800000040f00000000020100190000000a0100002909a107800000040f0000000002010019000000090100002909a107800000040f0000000002010019000000070100002909a107800000040f0000000002010019000000080100002909a107800000040f0000000002010019000000050100002909a107800000040f00000002080000290000001f0280018f000000030300002900000001033003670000000504800270000000000540004c0000024f0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000002470000413d000000000520004c0000025e0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000000028100190000000000020435000000130100002909a107800000040f00000006040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000003b10000213d0000000102200190000003910000613d000003b10000013d000000010110008c000003240000c13d0000027201000041000000000010043909a106e60000040f09a108b10000040f000000010200036700130000000203530000000c03000029000000000232034f000000000202043b000a00000001001d000000000102001909a108b10000040f0000000f03000029000000130200035f000000000232034f000000000202043b000900000001001d000000000102001909a108b10000040f0000001002000029000000130300035f000000000223034f000000000202043b000800000001001d000000000102001909a108b10000040f0000001102000029000000130300035f000000000223034f000000000202043b0000027302200197001300000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b000700000001001d000000000102001909a108b10000040f00000000060100190000000a01000029000000090200002900000008030000290000001304000029000000070500002909a107da0000040f000a00000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c000003320000c13d0000001601000029000000150200002909a107120000040f000000000220004c0000007b0000613d0000000101100367000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039001300000002001d000000000110004c000003340000c13d09a107b40000040f001300000001001d000003340000013d09a108e60000040f001300000001001d09a109380000040f000900000001001d0000000a010000290000000001010433000800000001001d0000001301000029001300000001001d0000000001010433000700000001001d0000001601000029000000150200002909a107120000040f00000008010000290000000703000029000000000113001900000000012100190000000902000029000000000202043300000000012100190000026f0110019709a1094a0000040f000500000001001d0000001601000029000000150200002909a107120000040f0000001403000029000000000403043300000275030000410000002005400039000400000005001d0000000000350435000800000001001d000700000002001d000600000004001d0000002102400039000000050100002909a107800000040f00000000020100190000000a0100002909a107800000040f0000000002010019000000130100002909a107800000040f00000007080000290000001f0280018f000000080300002900000001033003670000000504800270000000000540004c000003010000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000002f90000413d000000000520004c000003100000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000000028100190000000000020435000000090100002909a107800000040f00000006040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000003b10000213d0000000102200190000003910000613d000003b10000013d000000000106043300000044021000390000027003000041000000000032043500000024021000390000001703000039000000000032043500000271020000410000000000210435000000040210003900000020030000390000000000320435000000640200003909a106ff0000040f09a108e60000040f001300000001001d09a109260000040f000900000001001d0000000a010000290000000001010433000800000001001d0000001301000029001300000001001d0000000001010433000700000001001d0000001601000029000000150200002909a107120000040f00000008010000290000000703000029000000000113001900000000012100190000000902000029000000000202043300000000012100190000026f0110019709a1094a0000040f000500000001001d0000001601000029000000150200002909a107120000040f0000001403000029000000000403043300000274030000410000002005400039000400000005001d0000000000350435000800000001001d000700000002001d000600000004001d0000002102400039000000050100002909a107800000040f00000000020100190000000a0100002909a107800000040f0000000002010019000000130100002909a107800000040f00000007080000290000001f0280018f000000080300002900000001033003670000000504800270000000000540004c0000036f0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000003670000413d000000000520004c0000037e0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000000028100190000000000020435000000090100002909a107800000040f00000006040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000003b10000213d0000000102200190000003b10000c13d000000140200002900000000001204350000000002040433000000040100002909a106cf0000040f00000001050003670000001603000029000000000235034f0000000004010019000000000102043b000000710210008c001300000004001d000003b90000c13d0000000e01000029000001e402100039000000000103001909a107120000040f000000000300003109a1073f0000040f0000000002010433000000200110003909a106cf0000040f00000013030000290000001405000029000000000405043300000040024000390000000000120435000000000054043500000020014000390000000000310435000002820240009c000004000000a13d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000000000210004c0000040e0000c13d0000000c01000029000000000115034f000000000101043b000c00000005035309a108b10000040f0000000f020000290000000c0300035f000000000223034f000000000202043b000f00000001001d000000000102001909a108b10000040f00000010020000290000000c0300035f000000000223034f000000000202043b001000000001001d000000000102001909a108b10000040f0000000002010019000000100100002909a1078d0000040f00000011020000290000000c0300035f000000000223034f000000000202043b0000027302200197001100000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b000c00000001001d000000000102001909a108b10000040f001000000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c000004680000c13d0000001601000029000000150200002909a107120000040f000000000220004c0000007b0000613d0000000101100367000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039000b00000002001d000000000110004c0000046a0000c13d09a107b40000040f000b00000001001d0000046a0000013d000000600540003900000014020000290000000000520435000000000204043309a106cf0000040f0000001402000029000000000402043300000020034000390000001305000029000000000053043500000000001404350000000001040019000000000300001909a106f50000040f000000020210008c000005270000c13d0000027201000041000000000010043909a106e60000040f09a108b10000040f0000000103000367000d0000000303530000000c02000029000000000223034f000000000202043b000c00000001001d000000000102001909a108b10000040f0000000b020000290000000d0300035f000000000223034f000000000202043b000b00000001001d000000000102001909a108b10000040f0000000f020000290000000d0300035f000000000223034f000000000202043b000f00000001001d000000000102001909a108b10000040f00000010020000290000000d0300035f000000000223034f000000000202043b001000000001001d000000000102001909a108b10000040f00000011020000290000000d0300035f000000000223034f000000000202043b0000027302200197001100000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b001200000001001d000000000102001909a108b10000040f00000000070100190000000c010000290000000b020000290000000f0300002900000010040000290000001105000029000000120600002909a108110000040f001100000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c000005790000c13d0000001601000029000000150200002909a107120000040f000000000220004c0000007b0000613d0000000101100367000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039001000000002001d000000000110004c0000057b0000c13d09a107b40000040f001000000001001d0000057b0000013d09a108e60000040f000b00000001001d0000000e01000029000001e4021000390000001601000029001200000002001d09a107120000040f000000200220008c000006380000413d0000000101100367000000000101043b09a108b10000040f000e00000001001d0000001601000029000000120200002909a107120000040f000000400220008c000006380000413d00000020011000390000000101100367000000000101043b09a108b10000040f000a00000001001d0000001601000029000000120200002909a107120000040f000000410220008c0000007b0000413d00000040011000390000000101100367000000000101043b000000f801100270001200000001001d0000001b0110008a000000020110008c0000000001000019000000010100403909a107c80000040f0000000d010000290000000101100367000000000101043b000000000110004c0000001201000029000004a50000613d0000027201000041000000000010043909a106e60000040f0000000102100210000000000310004c000004a00000613d000000090300008a000000000332004b000006530000213d00000000311200d9000000020110008c000006530000c13d000000120300002900000000012300190000000801100039000000000231004b000006530000413d09a108b10000040f001200000001001d0000000b01000029000b00000001001d0000000001010433000d00000001001d00000010010000290000000001010433000900000001001d0000000c010000290000000001010433000800000001001d0000000f010000290000000001010433000700000001001d00000011010000290000000001010433000600000001001d0000001601000029000000150200002909a107120000040f00000007010000290000000603000029000000000113001900000008030000290000000001310019000000090300002900000000013100190000000d03000029000000000131001900000000012100190000000e02000029000000000202043300000000012100190000000a02000029000000000202043300000000012100190000001202000029000000000202043300000000012100190000026f0110019709a1094a0000040f000800000001001d0000001601000029000000150200002909a107120000040f00000014030000290000000003030433000d00000001001d000900000002001d001600000003001d00000020023000390000000801000029001500000002001d09a107800000040f00000000020100190000000f0100002909a107800000040f0000000002010019000000110100002909a107800000040f00000000020100190000000c0100002909a107800000040f0000000002010019000000100100002909a107800000040f00000000020100190000000b0100002909a107800000040f00000009040000290000001f0240018f0000000d03000029000000010330036700000000080400190000000504400270000000000540004c000004fc0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000004f40000413d000000000520004c0000050b0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f000000000024043500000000028100190000000000020435000000120100002909a107800000040f00000000020100190000000e0100002909a107800000040f00000000020100190000000a0100002909a107800000040f00000016040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000005420019000000000225004b000000000200001900000001020040390000026f0350009c00000013030000290000001501000029000003b10000213d0000000102200190000004010000613d000003b10000013d000000010110008c000006190000c13d0000027201000041000000000010043909a106e60000040f09a108b10000040f0000000103000367000d0000000303530000000c02000029000000000223034f000000000202043b000c00000001001d000000000102001909a108b10000040f0000000f020000290000000d0300035f000000000223034f000000000202043b000f00000001001d000000000102001909a108b10000040f00000010020000290000000d0300035f000000000223034f000000000202043b001000000001001d000000000102001909a108b10000040f00000011020000290000000d0300035f000000000223034f000000000202043b0000027302200197001100000001001d000000000102001909a108990000040f00000012020000290000000102200367000000000202043b001200000001001d000000000102001909a108b10000040f00000000060100190000000c010000290000000f0200002900000010030000290000001104000029000000120500002909a107da0000040f001100000001001d0000001601000029000000150200002909a107120000040f0000026f01200197000000010210008c000006240000c13d0000001601000029000000150200002909a107120000040f000000000220004c0000007b0000613d0000000101100367000000000101043b000000010200008a0000026e03000041000000000221004b000000000200001900000000020320190000026e011001970000026e0410009c00000000030080190000026e011001670000026e0110009c000000000102001900000000010360190000006002000039001000000002001d000000000110004c000006260000c13d09a107b40000040f001000000001001d000006260000013d09a108e60000040f001000000001001d09a109380000040f000f00000001001d0000000e01000029000001e4021000390000001601000029001200000002001d09a107120000040f000000200220008c000006380000413d0000000101100367000000000101043b09a108b10000040f000e00000001001d0000001601000029000000120200002909a107120000040f000000400220008c000006380000413d00000020011000390000000101100367000000000101043b09a108b10000040f000d00000001001d0000001601000029000000120200002909a107120000040f000000410220008c0000007b0000413d00000040011000390000000101100367000000000101043b001200000001001d000000f8011002700000001b0110008a000c00000001001d000000020110008c0000000001000019000000010100403909a107c80000040f0000001201000029000002800110009c000006530000413d0000000c0100002909a108b10000040f001200000001001d00000011010000290000000001010433000c00000001001d0000001001000029001000000001001d0000000001010433000b00000001001d0000001601000029000000150200002909a107120000040f0000000c010000290000000b03000029000000000113001900000000012100190000000f02000029000000000202043300000000012100190000000e02000029000000000202043300000000012100190000000d02000029000000000202043300000000012100190000001202000029000000000202043300000000012100190000026f0110019709a1094a0000040f000a00000001001d0000001601000029000000150200002909a107120000040f0000001403000029000000000403043300000275030000410000002005400039001500000005001d0000000000350435000c00000001001d000b00000002001d001600000004001d00000021024000390000000a0100002909a107800000040f0000000002010019000000110100002909a107800000040f0000000002010019000000100100002909a107800000040f0000000b040000290000001f0240018f0000000c03000029000000010330036700000000080400190000000504400270000000000540004c000005eb0000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000005e30000413d000000000520004c000005fa0000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f0000000000240435000000000281001900000000000204350000000f0100002909a107800000040f0000000002010019000000120100002909a107800000040f00000000020100190000000e0100002909a107800000040f00000000020100190000000d0100002909a107800000040f00000016040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000005420019000000000225004b000000000200001900000001020040390000026f0350009c00000013030000290000001501000029000003b10000213d0000000102200190000004010000613d000003b10000013d00000014010000290000000002010433001600000002001d00000271010000410000000000120435000000040120003909a107080000040f00000016030000290000000002310049000000000103001909a106ff0000040f09a108e60000040f001000000001001d09a109260000040f000f00000001001d0000000e01000029000001e4021000390000001601000029001200000002001d09a107120000040f000000200220008c000006380000413d0000000101100367000000000101043b09a108b10000040f000e00000001001d0000001601000029000000120200002909a107120000040f000000400220008c0000063b0000813d0000000001000019000000000200001909a106ff0000040f00000020011000390000000101100367000000000101043b09a108b10000040f000d00000001001d0000001601000029000000120200002909a107120000040f000000410220008c0000007b0000413d00000040011000390000000101100367000000000101043b001200000001001d000000f8011002700000001b0110008a000c00000001001d000000020110008c0000000001000019000000010100403909a107c80000040f0000001201000029000002800110009c0000065b0000813d000002810100004100000000001004350000001101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f0000000c0100002909a108b10000040f001200000001001d00000011010000290000000001010433000c00000001001d0000001001000029001000000001001d0000000001010433000b00000001001d0000001601000029000000150200002909a107120000040f0000000c010000290000000b03000029000000000113001900000000012100190000000f02000029000000000202043300000000012100190000000e02000029000000000202043300000000012100190000000d02000029000000000202043300000000012100190000001202000029000000000202043300000000012100190000026f0110019709a1094a0000040f000a00000001001d0000001601000029000000150200002909a107120000040f0000001403000029000000000403043300000274030000410000002005400039001500000005001d0000000000350435000c00000001001d000b00000002001d001600000004001d00000021024000390000000a0100002909a107800000040f0000000002010019000000110100002909a107800000040f0000000002010019000000100100002909a107800000040f0000000b040000290000001f0240018f0000000c03000029000000010330036700000000080400190000000504400270000000000540004c000006a10000613d000000000500001900000005065002100000000007610019000000000663034f000000000606043b00000000006704350000000105500039000000000645004b000006990000413d000000000520004c000006b00000613d0000000504400210000000000343034f00000000044100190000000302200210000000000504043300000000052501cf000000000525022f000000000303043b0000010002200089000000000323022f00000000022301cf000000000252019f0000000000240435000000000281001900000000000204350000000f0100002909a107800000040f0000000002010019000000120100002909a107800000040f00000000020100190000000e0100002909a107800000040f00000000020100190000000d0100002909a107800000040f00000016040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000005420019000000000225004b000000000200001900000001020040390000026f0350009c00000013030000290000001501000029000003b10000213d0000000102200190000004010000613d000003b10000013d0000026a030000410000026a0410009c000000000103801900000040011002100000026a0420009c00000000020380190000006002200210000000000112019f00000000020004140000026a0420009c0000000002038019000000c002200210000000000112019f00000275011001c7000080100200003909a1099c0000040f0000000102200190000006e30000613d000000000101043b000000000001042d0000000001000019000000000200001909a106ff0000040f0000026a0100004100000000020004140000026a0320009c0000000001024019000000c00110021000000283011001c70000800b0200003909a1099c0000040f0000000102200190000006f20000613d000000000101043b000000000001042d0000000001000019000000000200001909a106ff0000040f0000026a040000410000026a0510009c0000000001048019000000400110021000000000013100190000026a0320009c000000000204801900000060022002100000000001210019000009a20001042e0000026a030000410000026a0420009c00000000020380190000026a0410009c000000000103801900000040011002100000006002200210000000000112019f000009a300010430000000400210003900000284030000410000000000320435000000200210003900000013030000390000000000320435000000200200003900000000002104350000006001100039000000000001042d000000000300003100000000041300490000001f0540008a0000000104000367000000000224034f000000000202043b0000026e06000041000000000752004b000000000700001900000000070640190000026e055001970000026e08200197000000000958004b000000000600a019000000000558013f0000026e0550009c00000000050700190000000005066019000000000550004c0000073c0000613d0000000001120019000000000214034f000000000202043b0000026f0420009c0000073c0000213d000000000323004900000020011000390000026e04000041000000000531004b000000000500001900000000050420190000026e033001970000026e06100197000000000736004b0000000004008019000000000336013f0000026e0330009c00000000030500190000000003046019000000000330004c0000073c0000c13d000000000001042d0000000001000019000000000200001909a106ff0000040f0000000004010019000002850120009c000007750000813d0000003f01200039000000200500008a000000000651016f000000400500003900000000010504330000000006610019000000000716004b000000000700001900000001070040390000026f0860009c000007750000213d0000000107700190000007750000c13d000000000065043500000000002104350000000005420019000000000335004b0000077d0000213d0000001f0520018f000000010440036700000020031000390000000506200270000000000760004c000007630000613d000000000700001900000005087002100000000009830019000000000884034f000000000808043b00000000008904350000000107700039000000000867004b0000075b0000413d000000000750004c000007720000613d0000000506600210000000000464034f00000000066300190000000305500210000000000706043300000000075701cf000000000757022f000000000404043b0000010005500089000000000454022f00000000045401cf000000000474019f000000000046043500000000022300190000000000020435000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f0000000001000019000000000200001909a106ff0000040f00000000030104330000000004000019000000000534004b0000078a0000813d00000000052400190000002004400039000000000614001900000000060604330000000000650435000007820000013d00000000012300190000000000010435000000000001042d0003000000000002000300000002001d0000004002000039000100000002001d0000000002020433000200000002001d000000200220003909a107800000040f0000000002010019000000030100002909a107800000040f00000002040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000007ac0000213d0000000102200190000007ac0000c13d0000000102000029000000000012043500000000010400190000000300000005000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f00000040020000390000000001020433000002860310009c000007c00000813d0000004003100039000000000032043500000020021000390000028703000041000000000032043500000001020000390000000000210435000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000000000110004c000007cb0000613d000000000001042d0000004001000039000000000101043300000044021000390000028803000041000000000032043500000024021000390000000f03000039000000000032043500000271020000410000000000210435000000040210003900000020030000390000000000320435000000640200003909a106ff0000040f0007000000000002000700000006001d000600000005001d000400000004001d000300000003001d000200000002001d0000004002000039000100000002001d0000000002020433000500000002001d000000200220003909a107800000040f0000000002010019000000020100002909a107800000040f0000000002010019000000030100002909a107800000040f0000000002010019000000040100002909a107800000040f0000000002010019000000060100002909a107800000040f0000000002010019000000070100002909a107800000040f00000005040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000008090000213d0000000102200190000008090000c13d0000000102000029000000000012043500000000010400190000000700000005000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f0008000000000002000800000007001d000700000006001d000500000005001d000400000004001d000300000003001d000200000002001d0000004002000039000100000002001d0000000002020433000600000002001d000000200220003909a107800000040f0000000002010019000000020100002909a107800000040f0000000002010019000000030100002909a107800000040f0000000002010019000000040100002909a107800000040f0000000002010019000000050100002909a107800000040f0000000002010019000000070100002909a107800000040f0000000002010019000000080100002909a107800000040f00000006040000290000000001410049000000200210008a00000000002404350000001f01100039000000200200008a000000000221016f0000000001420019000000000221004b000000000200001900000001020040390000026f0310009c000008440000213d0000000102200190000008440000c13d0000000102000029000000000012043500000000010400190000000800000005000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f00000040020000390000000001020433000002860310009c000008640000813d0000004003100039000000000032043500000001020000390000000000210435000000200210003900000000030000310000000103300367000000000400001900000005054002100000000006520019000000000553034f000000000505043b00000000005604350000000104400039000000000540004c000000000500001900000001050060390000000105500190000008580000c13d000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f0000000002010019000002850120009c000008910000813d0000003f01200039000000200300008a000000000431016f000000400300003900000000010304330000000004410019000000000514004b000000000500001900000001050040390000026f0640009c000008910000213d0000000105500190000008910000c13d000000000043043500000000002104350000001f022000390000000502200270000000000320004c0000088e0000613d000000200310003900000000040000310000000104400367000000000500001900000005065002100000000007630019000000000664034f000000000606043b00000000006704350000000105500039000000000625004b000008860000413d000000000200004c000008900000613d000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000000000201001900000040030000390000000001030433000002860410009c000008a90000813d0000004004100039000000000043043500000020031000390000028904000041000000000043043500000060022002100000002103100039000000000023043500000015020000390000000000210435000000000001042d000002810100004100000000001004350000004101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f00020000000000020000007f0210008c000200000001001d000008ce0000a13d09a109800000040f000100000001001d000000020110003909a1086c0000040f0000000002010433000000000220004c000008de0000613d000000200210003900000000030204330000028a033001970000000105000029000000f804500210000000000334019f000002870330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c0000000002030019000000000200201900000021031000390000000000230435000008dc0000013d09a1084c0000040f0000000002010433000000000220004c000008de0000613d0000000204000029000000f8024002100000026e03000041000000000440004c0000000002036019000000200310003900000000040304330000028a04400197000000000224019f00000000002304350000000200000005000000000001042d000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000200000000000200000000030100190000026f01300197000000010210008c000009160000613d000000370210008c000009070000a13d000200000001001d09a109800000040f000100000001001d000000020110003909a1086c0000040f0000000002010433000000000220004c0000091e0000613d000000200210003900000000030204330000028a033001970000000105000029000000f804500210000000000334019f0000028b0330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c0000000002030019000000000200201900000021031000390000000000230435000009140000013d000200000003001d09a1084c0000040f0000000002010433000000000220004c0000091e0000613d0000000202000029000000f802200210000000200310003900000000040304330000028a04400197000000000224019f0000026e0220016700000000002304350000000200000005000000000001042d000002810100004100000000001004350000000101000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f09a1084c0000040f0000000002010433000000000220004c000009300000613d000000200210003900000000030204330000028a033001970000028c033001c70000000000320435000000000001042d000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f09a1084c0000040f0000000002010433000000000220004c000009420000613d000000200210003900000000030204330000028a033001970000028c033001c70000000000320435000000000001042d000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f000200000000000200000000030100190000026f01300197000000370210008c000009690000a13d000200000001001d09a109800000040f000100000001001d000000020110003909a1086c0000040f0000000002010433000000000220004c000009780000613d000000200210003900000000030204330000028a033001970000000105000029000000f804500210000000000334019f0000028d0330004100000000003204350000000302500210000000f802200089000000020300002900000000032301cf000000ff0220008c0000000002030019000000000200201900000021031000390000000000230435000009760000013d000200000003001d09a1084c0000040f0000000002010433000000000220004c000009780000613d0000000202000029000000f802200210000000200310003900000000040304330000028a04400197000000000242019f0000028c0220004100000000002304350000000200000005000000000001042d000002810100004100000000001004350000003201000039000000040200003900000000001204350000002402000039000000000100001909a106ff0000040f00000080021002700000028e0310009c000000000201a0190000028e0110009c0000000001000019000000100100203900000008031001bf0000026f0420009c000000000103201900000040032002700000026f0420009c000000000203201900000004031001bf0000026a0420009c000000000103201900000020032002700000026a0420009c000000000203201900000002031001bf0000ffff0420008c000000000103201900000010032002700000000002032019000000ff0220008c000000000200001900000001020020390000000001210019000000000001042d0000099f002104230000000102000039000000000001042d0000000002000019000000000001042d000009a100000432000009a20001042e000009a300010430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0000000200000000000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000000000000ebe4a3d7000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff456e636f64696e6720756e737570706f7274656420747800000000000000000008c379a0000000000000000000000000000000000000000000000000000000009a8a0592ac89c5ad3bc6df8224c17b485976f597df104ee20d0df415241f670b000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff848e1bfa1ac4e3576b728bda6721b215c70a7799a5b4866282a71bab954baac8000000000000000000000000000000000000000000000000fffffffffffffe1fad7c5bef027816a800da1736444fb58a807ef4c9603b7848673f7e3a68eb14a519b453ce45aaaaf3a300f5a9ec95869b4f28ab10430b572ee218c3a6a5e07d6fc2f8787176b8ac6bf7215b4adcc1e069bf4ab82d9ab1df05a57a91d425935b6e000000000000000000000000000000000000000000000000ffffffffffffff5f1901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff7f80800000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000004e487b7100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff9f0200000200000000000000000000000000000004000000000000000000000000556e737570706f727465642074782074797065000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000ffffffffffffffc08100000000000000000000000000000000000000000000000000000000000000496e76616c696420762076616c75650000000000000000000000000000000000940000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb800000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000f80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff", + "0x000000000000000000000000000000000000800d": "0x000d000000000002000b0000000c001f000a0000000b001f00090000000a001f000800000009001f000700000008001f000600000007001f000500000006001f000400000005001f000300000004001f000200000003001f000c0000000103550000006001100270000000330010019d000100000002001f000000800300003900000040010000390000000000310435000000000300041600000001022001900000002e0000c13d000000000230004c000000380000c13d000000020200003900000001022001880000003b0000c13d0000000002000411000000350220009c0000003b0000413d000000000101043300000064021000390000003603000041000000000032043500000044021000390000003703000041000000000032043500000024021000390000002403000039000000000032043500000038020000410000000000210435000000040210003900000020030000390000000000320435000000840200003900c5004a0000040f000000000130004c000000380000c13d000000200200003900000100010000390000000000210439000001200200003900000000000204390000004002000039000000340300004100c500420000040f0000000001000019000000000200001900c5004a0000040f000000000100003100c500510000040f000000600100003900000000020104330000008001000039000000000300001900c500420000040f000000400110021000000000013100190000003303000041000000330420009c000000000203801900000060022002100000000001210019000000c60001042e0000003303000041000000330410009c000000000103801900000040011002100000006002200210000000000121019f000000c70001043000050000000000020000000203000031000000050230008c0000009b0000813d000300000001001d000000200210021000000000012300190000000101100039000000000221004b000000930000413d0000000002000411000000000021041f000000010110008c000000610000c13d0000000500000005000000000001042d000000020100008a000200000001001d0000000002000019000100000003001d000000000123004b000000930000413d0000000001230049000000010110008c0000007b0000a13d00000001012001bf000500000002001d00c500ad0000040f0000000502000029000400000001001d0000000201000029000000000112004b000000930000613d0000000202200039000500000002001d000000000102001900c500ad0000040f000000050200002900000001030000290000000404000029000000000014041e000000650000013d00000001013001900000000001000019000000840000613d000000000103001900c500ad0000040f0000000c02000367000000000202043b000000000021041e0000002001000039000000410200008a0000000303000029000000000331004b0000005f0000813d00000020031000390000000c04000367000000000334034f000000000414034f000000000404043b000000000303043b000000000034041e0000004003100039000000000121004b0000000001030019000000850000a13d000000390100004100000000001004350000001101000039000000040200003900000000001204350000002402000039000000000100001900c5004a0000040f0000004001000039000000000101043300000064021000390000003a03000041000000000032043500000044021000390000003b03000041000000000032043500000024021000390000002103000039000000000032043500000038020000410000000000210435000000040210003900000020030000390000000000320435000000840200003900c5004a0000040f0000000a0210008c000000b30000813d0000000501100210000000200110011a0000000201010031000000000001042d0000004001000039000000000101043300000064021000390000003c03000041000000000032043500000044021000390000003d03000041000000000032043500000024021000390000002603000039000000000032043500000038020000410000000000210435000000040210003900000020030000390000000000320435000000840200003900c5004a0000040f000000c500000432000000c60001042e000000c700010430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000666c61670000000000000000000000000000000000000000000000000000000054686973206d6574686f6420726571756972652073797374656d2063616c6c2008c379a0000000000000000000000000000000000000000000000000000000004e487b710000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000004f6e6c79203420696e6465786564206669656c64732061726520616c6c6f77656973746572730000000000000000000000000000000000000000000000000000546865726520617265206f6e6c792031302061636365737369626c65207265670000000000000000000000000000000000000000000000000000000000000000" + } +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs b/core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs new file mode 100644 index 000000000000..e2db59c79dfe --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/in_memory_setup_data_source.rs @@ -0,0 +1,320 @@ +use circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerFinalizationHint, ZkSyncBaseLayerProof, ZkSyncBaseLayerVerificationKey, +}; +use circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerFinalizationHint, ZkSyncRecursionLayerProof, + ZkSyncRecursionLayerVerificationKey, +}; +use std::collections::HashMap; +use std::io::{Error, ErrorKind}; +use zkevm_test_harness::data_source::{BlockDataSource, SetupDataSource, SourceResult}; + +pub struct InMemoryDataSource { + ///data structures required for holding [`SetupDataSource`] result + base_layer_vk: HashMap, + base_layer_padding_proof: HashMap, + base_layer_finalization_hint: HashMap, + recursion_layer_vk: HashMap, + recursion_layer_node_vk: Option, + recursion_layer_padding_proof: HashMap, + recursion_layer_finalization_hint: HashMap, + recursion_layer_leaf_padding_proof: Option, + recursion_layer_node_padding_proof: Option, + recursion_layer_node_finalization_hint: Option, + + ///data structures required for holding [`BlockDataSource`] result + base_layer_proofs: HashMap<(u8, usize), ZkSyncBaseLayerProof>, + leaf_layer_proofs: HashMap<(u8, usize), ZkSyncRecursionLayerProof>, + node_layer_proofs: HashMap<(u8, usize, usize), ZkSyncRecursionLayerProof>, + scheduler_proof: Option, +} + +impl InMemoryDataSource { + pub fn new() -> Self { + InMemoryDataSource { + base_layer_vk: HashMap::new(), + base_layer_padding_proof: HashMap::new(), + base_layer_finalization_hint: HashMap::new(), + recursion_layer_vk: HashMap::new(), + recursion_layer_node_vk: None, + recursion_layer_padding_proof: HashMap::new(), + recursion_layer_finalization_hint: HashMap::new(), + recursion_layer_leaf_padding_proof: None, + recursion_layer_node_padding_proof: None, + recursion_layer_node_finalization_hint: None, + base_layer_proofs: HashMap::new(), + leaf_layer_proofs: HashMap::new(), + node_layer_proofs: HashMap::new(), + scheduler_proof: None, + } + } +} + +impl SetupDataSource for InMemoryDataSource { + fn get_base_layer_vk(&self, circuit_type: u8) -> SourceResult { + self.base_layer_vk + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_base_layer_padding_proof(&self, circuit_type: u8) -> SourceResult { + self.base_layer_padding_proof + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_base_layer_finalization_hint( + &self, + circuit_type: u8, + ) -> SourceResult { + self.base_layer_finalization_hint + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_recursion_layer_vk( + &self, + circuit_type: u8, + ) -> SourceResult { + self.recursion_layer_vk + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_recursion_layer_node_vk(&self) -> SourceResult { + Ok(self.recursion_layer_node_vk.clone().unwrap()) + } + + fn get_recursion_layer_padding_proof( + &self, + circuit_type: u8, + ) -> SourceResult { + self.recursion_layer_padding_proof + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_recursion_layer_finalization_hint( + &self, + circuit_type: u8, + ) -> SourceResult { + self.recursion_layer_finalization_hint + .get(&circuit_type) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no data for circuit type {}", circuit_type), + ))) + } + + fn get_recursion_layer_leaf_padding_proof(&self) -> SourceResult { + Ok(self.recursion_layer_leaf_padding_proof.clone().unwrap()) + } + + fn get_recursion_layer_node_padding_proof(&self) -> SourceResult { + Ok(self.recursion_layer_node_padding_proof.clone().unwrap()) + } + + fn get_recursion_layer_node_finalization_hint( + &self, + ) -> SourceResult { + Ok(self.recursion_layer_node_finalization_hint.clone().unwrap()) + } + + fn set_base_layer_vk(&mut self, vk: ZkSyncBaseLayerVerificationKey) -> SourceResult<()> { + self.base_layer_vk.insert(vk.numeric_circuit_type(), vk); + Ok(()) + } + + fn set_base_layer_padding_proof(&mut self, proof: ZkSyncBaseLayerProof) -> SourceResult<()> { + self.base_layer_padding_proof + .insert(proof.numeric_circuit_type(), proof); + Ok(()) + } + + fn set_base_layer_finalization_hint( + &mut self, + hint: ZkSyncBaseLayerFinalizationHint, + ) -> SourceResult<()> { + self.base_layer_finalization_hint + .insert(hint.numeric_circuit_type(), hint); + Ok(()) + } + + fn set_recursion_layer_vk( + &mut self, + vk: ZkSyncRecursionLayerVerificationKey, + ) -> SourceResult<()> { + self.recursion_layer_vk + .insert(vk.numeric_circuit_type(), vk); + Ok(()) + } + + fn set_recursion_layer_node_vk( + &mut self, + vk: ZkSyncRecursionLayerVerificationKey, + ) -> SourceResult<()> { + self.recursion_layer_node_vk = Some(vk); + Ok(()) + } + + fn set_recursion_layer_padding_proof( + &mut self, + proof: ZkSyncRecursionLayerProof, + ) -> SourceResult<()> { + self.recursion_layer_padding_proof + .insert(proof.numeric_circuit_type(), proof); + Ok(()) + } + + fn set_recursion_layer_finalization_hint( + &mut self, + hint: ZkSyncRecursionLayerFinalizationHint, + ) -> SourceResult<()> { + self.recursion_layer_finalization_hint + .insert(hint.numeric_circuit_type(), hint); + Ok(()) + } + + fn set_recursion_layer_leaf_padding_proof( + &mut self, + proof: ZkSyncRecursionLayerProof, + ) -> SourceResult<()> { + self.recursion_layer_leaf_padding_proof = Some(proof); + Ok(()) + } + + fn set_recursion_layer_node_padding_proof( + &mut self, + proof: ZkSyncRecursionLayerProof, + ) -> SourceResult<()> { + self.recursion_layer_node_padding_proof = Some(proof); + Ok(()) + } + + fn set_recursion_layer_node_finalization_hint( + &mut self, + hint: ZkSyncRecursionLayerFinalizationHint, + ) -> SourceResult<()> { + self.recursion_layer_node_finalization_hint = Some(hint); + Ok(()) + } +} + +impl BlockDataSource for InMemoryDataSource { + fn get_base_layer_proof( + &self, + circuit_type: u8, + index: usize, + ) -> SourceResult { + self.base_layer_proofs + .get(&(circuit_type, index)) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!( + "no base layer proof for circuit type {} index {}", + circuit_type, index + ), + ))) + } + + fn get_leaf_layer_proof( + &self, + circuit_type: u8, + index: usize, + ) -> SourceResult { + self.leaf_layer_proofs + .get(&(circuit_type, index)) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!( + "no leaf layer proof for circuit type {} index {}", + circuit_type, index + ), + ))) + } + + fn get_node_layer_proof( + &self, + circuit_type: u8, + step: usize, + index: usize, + ) -> SourceResult { + self.node_layer_proofs + .get(&(circuit_type, step, index)) + .cloned() + .ok_or(Box::new(Error::new( + ErrorKind::Other, + format!( + "no node layer proof for circuit type {} index {} step {}", + circuit_type, index, step + ), + ))) + } + + fn get_scheduler_proof(&self) -> SourceResult { + self.scheduler_proof.clone().ok_or(Box::new(Error::new( + ErrorKind::Other, + format!("no scheduler proof"), + ))) + } + + fn set_base_layer_proof( + &mut self, + index: usize, + proof: ZkSyncBaseLayerProof, + ) -> SourceResult<()> { + let circuit_type = proof.numeric_circuit_type(); + self.base_layer_proofs.insert((circuit_type, index), proof); + Ok(()) + } + + fn set_leaf_layer_proof( + &mut self, + index: usize, + proof: ZkSyncRecursionLayerProof, + ) -> SourceResult<()> { + let circuit_type = proof.numeric_circuit_type(); + self.leaf_layer_proofs.insert((circuit_type, index), proof); + Ok(()) + } + + fn set_node_layer_proof( + &mut self, + circuit_type: u8, + step: usize, + index: usize, + proof: ZkSyncRecursionLayerProof, + ) -> SourceResult<()> { + self.node_layer_proofs + .insert((circuit_type, step, index), proof); + Ok(()) + } + + fn set_scheduler_proof(&mut self, proof: ZkSyncRecursionLayerProof) -> SourceResult<()> { + self.scheduler_proof = Some(proof); + Ok(()) + } +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/lib.rs b/core/bin/vk_setup_data_generator_server_fri/src/lib.rs new file mode 100644 index 000000000000..106d4e21af68 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/lib.rs @@ -0,0 +1,216 @@ +#![feature(generic_const_exprs)] +use std::fs::File; +use std::io::Read; + +use circuit_definitions::boojum::cs::implementations::hints::{ + DenseVariablesCopyHint, DenseWitnessCopyHint, +}; +use circuit_definitions::boojum::cs::implementations::polynomial_storage::{ + SetupBaseStorage, SetupStorage, +}; +use circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; +use circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; +use circuit_definitions::boojum::cs::oracle::merkle_tree::MerkleTreeWithCap; +use circuit_definitions::boojum::cs::oracle::TreeHasher; +use circuit_definitions::boojum::field::{PrimeField, SmallField}; + +use circuit_definitions::boojum::field::traits::field_like::PrimeFieldLikeVectorized; + +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; +use circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, +}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use zksync_config::configs::FriProverConfig; +use zksync_types::proofs::AggregationRound; + +pub mod in_memory_setup_data_source; +pub mod utils; + +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "F: serde::Serialize + serde::de::DeserializeOwned, P: serde::Serialize + serde::de::DeserializeOwned" +)] +pub struct ProverSetupData< + F: PrimeField + SmallField, + P: PrimeFieldLikeVectorized, + H: TreeHasher, +> { + pub setup_base: SetupBaseStorage, + pub setup: SetupStorage, + #[serde(bound( + serialize = "H::Output: serde::Serialize", + deserialize = "H::Output: serde::de::DeserializeOwned" + ))] + pub vk: VerificationKey, + #[serde(bound( + serialize = "H::Output: serde::Serialize", + deserialize = "H::Output: serde::de::DeserializeOwned" + ))] + pub setup_tree: MerkleTreeWithCap, + pub vars_hint: DenseVariablesCopyHint, + pub wits_hint: DenseWitnessCopyHint, + pub finalization_hint: FinalizationHintsForProver, +} + +enum ProverServiceDataType { + VerificationKey, + SetupData, +} + +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct ProverServiceDataKey { + pub circuit_id: u8, + pub round: AggregationRound, +} + +impl ProverServiceDataKey { + pub fn new(circuit_id: u8, round: AggregationRound) -> Self { + Self { circuit_id, round } + } +} + +pub fn get_base_path() -> String { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); + format!( + "{}/core/bin/vk_setup_data_generator_server_fri/data", + zksync_home + ) +} + +pub fn get_base_vk_path() -> String { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| "/".into()); + format!( + "{}/core/bin/vk_setup_data_generator_server_fri/data", + zksync_home + ) +} + +fn get_file_path(key: ProverServiceDataKey, service_data_type: ProverServiceDataType) -> String { + let name = match key.round { + AggregationRound::BasicCircuits => { + format!("basic_{}", key.circuit_id) + } + AggregationRound::LeafAggregation => { + format!("leaf_{}", key.circuit_id) + } + AggregationRound::NodeAggregation => "node".to_string(), + AggregationRound::Scheduler => "scheduler".to_string(), + }; + match service_data_type { + ProverServiceDataType::VerificationKey => { + format!("{}/verification_{}_key.json", get_base_vk_path(), name) + } + ProverServiceDataType::SetupData => { + format!( + "{}/setup_{}_data.bin", + FriProverConfig::from_env().setup_data_path, + name + ) + } + } +} + +pub fn get_base_layer_vk_for_circuit_type(circuit_type: u8) -> ZkSyncBaseLayerVerificationKey { + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("Fetching verification key from path: {}", filepath); + let text = std::fs::read_to_string(&filepath) + .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); + serde_json::from_str::(&text).unwrap_or_else(|_| { + panic!( + "Failed deserializing verification key from path: {}", + filepath + ) + }) +} + +pub fn get_recursive_layer_vk_for_circuit_type( + circuit_type: u8, +) -> ZkSyncRecursionLayerVerificationKey { + let round = get_round_for_recursive_circuit_type(circuit_type); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, round), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("Fetching verification key from path: {}", filepath); + let text = std::fs::read_to_string(&filepath) + .unwrap_or_else(|_| panic!("Failed reading verification key from path: {}", filepath)); + serde_json::from_str::(&text).unwrap_or_else(|_| { + panic!( + "Failed deserializing verification key from path: {}", + filepath + ) + }) +} + +pub fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { + match circuit_type { + circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { + AggregationRound::Scheduler + } + circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 => { + AggregationRound::NodeAggregation + } + _ => AggregationRound::LeafAggregation, + } +} + +pub fn save_base_layer_vk(vk: ZkSyncBaseLayerVerificationKey) { + let circuit_type = vk.numeric_circuit_type(); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("saving basic verification key to: {}", filepath); + std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); +} + +pub fn save_recursive_layer_vk(vk: ZkSyncRecursionLayerVerificationKey) { + let circuit_type = vk.numeric_circuit_type(); + let round = get_round_for_recursive_circuit_type(circuit_type); + let filepath = get_file_path( + ProverServiceDataKey::new(circuit_type, round), + ProverServiceDataType::VerificationKey, + ); + vlog::info!("saving recursive layer verification key to: {}", filepath); + std::fs::write(filepath, serde_json::to_string_pretty(&vk).unwrap()).unwrap(); +} + +pub fn get_setup_data_for_circuit_type( + key: ProverServiceDataKey, +) -> ProverSetupData +where + F: PrimeField + SmallField + Serialize + DeserializeOwned, + P: PrimeFieldLikeVectorized + Serialize + DeserializeOwned, + H: TreeHasher, + >::Output: Serialize + DeserializeOwned, +{ + let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); + let mut file = File::open(filepath.clone()) + .unwrap_or_else(|_| panic!("Failed reading setup-data from path: {:?}", filepath)); + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).unwrap_or_else(|_| { + panic!( + "Failed reading setup-data to buffer from path: {:?}", + filepath + ) + }); + vlog::info!("loading {:?} setup data from path: {}", key, filepath); + bincode::deserialize::>(&buffer).unwrap_or_else(|_| { + panic!( + "Failed deserializing setup-data at path: {:?} for circuit: {:?}", + filepath, key + ) + }) +} + +pub fn save_setup_data(key: ProverServiceDataKey, serialized_setup_data: &Vec) { + let filepath = get_file_path(key.clone(), ProverServiceDataType::SetupData); + vlog::info!("saving {:?} setup data to: {}", key, filepath); + std::fs::write(filepath.clone(), serialized_setup_data) + .unwrap_or_else(|_| panic!("Failed saving setup-data at path: {:?}", filepath)); +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/main.rs b/core/bin/vk_setup_data_generator_server_fri/src/main.rs new file mode 100644 index 000000000000..589af0504cb2 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -0,0 +1,54 @@ +#![feature(generic_const_exprs)] + +use crate::in_memory_setup_data_source::InMemoryDataSource; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use zkevm_test_harness::compute_setups::{ + generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, +}; +use zkevm_test_harness::data_source::SetupDataSource; +use zksync_vk_setup_data_server_fri::{save_base_layer_vk, save_recursive_layer_vk}; + +mod in_memory_setup_data_source; +mod vk_generator; + +fn save_vks(source: &dyn SetupDataSource) { + for base_circuit_type in + (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) + { + let vk = source + .get_base_layer_vk(base_circuit_type) + .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", base_circuit_type)); + save_base_layer_vk(vk); + } + for leaf_circuit_type in (ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8) + ..=(ZkSyncRecursionLayerStorageType::LeafLayerCircuitForL1MessagesHasher as u8) + { + let vk = source + .get_recursion_layer_vk(leaf_circuit_type) + .unwrap_or_else(|_| panic!("No vk exist for circuit type: {}", leaf_circuit_type)); + save_recursive_layer_vk(vk); + } + save_recursive_layer_vk( + source + .get_recursion_layer_node_vk() + .expect("No vk exist for node layer circuit"), + ); + save_recursive_layer_vk( + source + .get_recursion_layer_vk(ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8) + .expect("No vk exist for scheduler circuit"), + ); +} + +fn generate_vks() { + let mut in_memory_source = InMemoryDataSource::new(); + generate_base_layer_vks_and_proofs(&mut in_memory_source).expect("Failed generating base vk's"); + generate_recursive_layer_vks_and_proofs(&mut in_memory_source) + .expect("Failed generating recursive vk's"); + save_vks(&in_memory_source); +} + +fn main() { + generate_vks(); +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs new file mode 100644 index 000000000000..d2e0858ea323 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -0,0 +1,137 @@ +use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use circuit_definitions::boojum::worker::Worker; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; +use circuit_definitions::{ + ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, +}; +use structopt::StructOpt; +use zkevm_test_harness::geometry_config::get_geometry_config; +use zkevm_test_harness::prover_utils::{ + create_base_layer_setup_data, create_recursive_layer_setup_data, +}; +use zksync_types::proofs::AggregationRound; +use zksync_vk_setup_data_server_fri::utils::{ + get_basic_circuits, get_leaf_circuits, get_node_circuit, get_scheduler_circuit, CYCLE_LIMIT, +}; +use zksync_vk_setup_data_server_fri::{ + get_round_for_recursive_circuit_type, save_setup_data, ProverServiceDataKey, ProverSetupData, +}; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "Generate setup data for individual circuit", + about = "Tool for generating setup data for individual circuit" +)] +struct Opt { + /// Numeric circuit type valid value are + /// 1. for base layer [1-13]. + /// 2. for recursive layer [1-15]. + #[structopt(long)] + numeric_circuit: u8, + /// Boolean representing whether to generate for base layer or for recursive layer. + #[structopt(short = "b", long = "is_base_layer")] + is_base_layer: bool, +} + +fn main() { + let opt = Opt::from_args(); + match opt.is_base_layer { + true => { + let circuit = get_base_layer_circuit(opt.numeric_circuit); + generate_base_layer_setup_data(circuit); + } + false => { + let circuit = get_recursive_circuit(opt.numeric_circuit); + generate_recursive_layer_setup_data(circuit); + } + } +} + +fn get_base_layer_circuit( + id: u8, +) -> ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, +> { + get_basic_circuits(CYCLE_LIMIT, get_geometry_config()) + .into_iter() + .find(|circuit| id == circuit.numeric_circuit_type()) + .unwrap_or_else(|| panic!("No basic circuit found for id: {}", id)) +} + +fn get_recursive_circuit(id: u8) -> ZkSyncRecursiveLayerCircuit { + let mut recursive_circuits = get_leaf_circuits(); + recursive_circuits.push(get_node_circuit()); + recursive_circuits.push(get_scheduler_circuit()); + recursive_circuits + .into_iter() + .find(|circuit| id == circuit.numeric_circuit_type()) + .unwrap_or_else(|| panic!("No recursive circuit found for id: {}", id)) +} + +fn generate_recursive_layer_setup_data(circuit: ZkSyncRecursiveLayerCircuit) { + let circuit_type = circuit.numeric_circuit_type(); + vlog::info!( + "starting setup data generator for recursive layer circuit: {}.", + circuit_type + ); + let worker = Worker::new(); + let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = + create_recursive_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + let prover_setup_data = ProverSetupData { + setup_base, + setup, + vk: vk.clone(), + setup_tree, + vars_hint, + wits_hint, + finalization_hint, + }; + let serialized = bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); + let round = get_round_for_recursive_circuit_type(circuit_type); + save_setup_data(ProverServiceDataKey::new(circuit_type, round), &serialized); +} + +fn generate_base_layer_setup_data( + circuit: ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, +) { + let circuit_type = circuit.numeric_circuit_type(); + vlog::info!( + "starting setup data generator for base layer circuit: {}.", + circuit_type + ); + let worker = Worker::new(); + let (setup_base, setup, vk, setup_tree, vars_hint, wits_hint, finalization_hint) = + create_base_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + let prover_setup_data = ProverSetupData { + setup_base, + setup, + vk: vk.clone(), + setup_tree, + vars_hint, + wits_hint, + finalization_hint, + }; + let serialized = bincode::serialize(&prover_setup_data).expect("Failed serializing setup data"); + save_setup_data( + ProverServiceDataKey::new(circuit_type, AggregationRound::BasicCircuits), + &serialized, + ); +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/utils.rs b/core/bin/vk_setup_data_generator_server_fri/src/utils.rs new file mode 100644 index 000000000000..014aac6b2a40 --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/utils.rs @@ -0,0 +1,322 @@ +use crate::{ + get_base_layer_vk_for_circuit_type, get_base_path, get_recursive_layer_vk_for_circuit_type, +}; +use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use circuit_definitions::boojum::gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness; +use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use circuit_definitions::boojum::gadgets::traits::allocatable::CSAllocatable; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::leaf_layer::ZkSyncLeafLayerRecursiveCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::node_layer::ZkSyncNodeLayerRecursiveCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionProof, ZkSyncRecursiveLayerCircuit, RECURSION_ARITY, SCHEDULER_CAPACITY, +}; +use circuit_definitions::zk_evm::bytecode_to_code_hash; +use circuit_definitions::zk_evm::testing::storage::InMemoryStorage; +use circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; +use circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::{ + RecursionLeafInput, RecursionLeafInstanceWitness, +}; +use circuit_definitions::zkevm_circuits::recursion::leaf_layer::LeafLayerRecursionConfig; +use circuit_definitions::zkevm_circuits::recursion::node_layer::input::{ + RecursionNodeInput, RecursionNodeInstanceWitness, +}; +use circuit_definitions::zkevm_circuits::recursion::node_layer::NodeLayerRecursionConfig; +use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; +use circuit_definitions::{ + base_layer_proof_config, recursion_layer_proof_config, zk_evm, ZkSyncDefaultRoundFunction, +}; +use itertools::Itertools; +use std::collections::{HashMap, VecDeque}; +use std::fs; +use zkevm_test_harness::compute_setups::{ + generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, +}; +use zkevm_test_harness::data_source::BlockDataSource; +use zkevm_test_harness::ethereum_types::{Address, U256}; +use zkevm_test_harness::external_calls::run; +use zkevm_test_harness::helper::artifact_utils::{save_predeployed_contracts, TestArtifact}; +use zkevm_test_harness::sha3::{Digest, Keccak256}; +use zkevm_test_harness::toolset::GeometryConfig; +use zkevm_test_harness::witness::full_block_artifact::{ + BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, + BlockBasicCircuitsPublicInputs, +}; +use zkevm_test_harness::witness::recursive_aggregation::compute_leaf_params; +use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZKSyncTestingTree}; + +use crate::in_memory_setup_data_source::InMemoryDataSource; + +pub const CYCLE_LIMIT: usize = 20000; + +fn read_witness_artifact(filepath: &str) -> TestArtifact { + let text = fs::read_to_string(filepath) + .unwrap_or_else(|_| panic!("Failed to read witness artifact from path: {}", filepath)); + serde_json::from_str(text.as_str()).unwrap() +} + +pub fn get_basic_circuits( + cycle_limit: usize, + geometry: GeometryConfig, +) -> Vec< + ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, +> { + let path = format!("{}/witness_artifacts.json", get_base_path()); + let test_artifact = read_witness_artifact(&path); + let (base_layer_circuit, _, _, _) = get_circuits(test_artifact, cycle_limit, geometry); + base_layer_circuit + .into_flattened_set() + .into_iter() + .dedup_by(|a, b| a.numeric_circuit_type() == b.numeric_circuit_type()) + .collect() +} + +pub fn get_leaf_circuits() -> Vec { + let mut circuits = vec![]; + for base_circuit_type in + (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) + { + let input = RecursionLeafInput::placeholder_witness(); + let vk = get_base_layer_vk_for_circuit_type(base_circuit_type); + + let witness = RecursionLeafInstanceWitness { + input, + vk_witness: vk.clone().into_inner(), + queue_witness: FullStateCircuitQueueRawWitness { + elements: VecDeque::new(), + }, + proof_witnesses: VecDeque::new(), + }; + + let config = LeafLayerRecursionConfig { + proof_config: base_layer_proof_config(), + vk_fixed_parameters: vk.into_inner().fixed_parameters, + capacity: RECURSION_ARITY, + _marker: std::marker::PhantomData, + }; + + let circuit = ZkSyncLeafLayerRecursiveCircuit { + base_layer_circuit_type: BaseLayerCircuitType::from_numeric_value(base_circuit_type), + witness: witness, + config: config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + + let circuit = ZkSyncRecursiveLayerCircuit::leaf_circuit_from_base_type( + BaseLayerCircuitType::from_numeric_value(base_circuit_type), + circuit, + ); + circuits.push(circuit) + } + circuits +} + +pub fn get_node_circuit() -> ZkSyncRecursiveLayerCircuit { + let input = RecursionNodeInput::placeholder_witness(); + + let input_vk = get_recursive_layer_vk_for_circuit_type( + ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8, + ); + let witness = RecursionNodeInstanceWitness { + input, + vk_witness: input_vk.clone().into_inner(), + split_points: VecDeque::new(), + proof_witnesses: VecDeque::new(), + }; + + let config = NodeLayerRecursionConfig { + proof_config: recursion_layer_proof_config(), + vk_fixed_parameters: input_vk.clone().into_inner().fixed_parameters, + leaf_layer_capacity: RECURSION_ARITY, + node_layer_capacity: RECURSION_ARITY, + _marker: std::marker::PhantomData, + }; + let circuit = ZkSyncNodeLayerRecursiveCircuit { + witness: witness, + config: config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + ZkSyncRecursiveLayerCircuit::NodeLayerCircuit(circuit) +} + +pub fn get_scheduler_circuit() -> ZkSyncRecursiveLayerCircuit { + let mut scheduler_witness = SchedulerCircuitInstanceWitness::placeholder(); + + // node VK + let node_vk = get_recursive_layer_vk_for_circuit_type( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .into_inner(); + scheduler_witness.node_layer_vk_witness = node_vk.clone(); + + let config = SchedulerConfig { + proof_config: recursion_layer_proof_config(), + vk_fixed_parameters: node_vk.fixed_parameters, + capacity: SCHEDULER_CAPACITY, + _marker: std::marker::PhantomData, + }; + let scheduler_circuit = SchedulerCircuit { + witness: scheduler_witness, + config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit) +} + +#[allow(dead_code)] +fn get_recursive_layer_proofs() -> Vec { + let mut in_memory_source = InMemoryDataSource::new(); + generate_base_layer_vks_and_proofs(&mut in_memory_source).expect("Failed generating base vk's"); + generate_recursive_layer_vks_and_proofs(&mut in_memory_source) + .expect("Failed generating recursive vk's"); + let mut scheduler_proofs: Vec = vec![]; + for recursive_circuit_type in (ZkSyncRecursionLayerStorageType::LeafLayerCircuitForMainVM as u8) + ..=(ZkSyncRecursionLayerStorageType::LeafLayerCircuitForL1MessagesHasher as u8) + { + let proof = in_memory_source + .get_node_layer_proof(recursive_circuit_type, 0, 0) + .unwrap(); + scheduler_proofs.push(proof.into_inner()); + } + scheduler_proofs +} + +pub fn get_leaf_vk_params() -> Vec<(u8, RecursionLeafParametersWitness)> { + let mut leaf_vk_commits = vec![]; + + for circuit_type in + (BaseLayerCircuitType::VM as u8)..=(BaseLayerCircuitType::L1MessagesHasher as u8) + { + let recursive_circuit_type = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(circuit_type), + ); + let base_vk = get_base_layer_vk_for_circuit_type(circuit_type); + let leaf_vk = get_recursive_layer_vk_for_circuit_type(recursive_circuit_type as u8); + let params = compute_leaf_params(circuit_type, base_vk, leaf_vk); + leaf_vk_commits.push((circuit_type, params)); + } + leaf_vk_commits +} + +fn get_circuits( + mut test_artifact: TestArtifact, + cycle_limit: usize, + geometry: GeometryConfig, +) -> ( + BlockBasicCircuits, + BlockBasicCircuitsPublicInputs, + BlockBasicCircuitsPublicCompactFormsWitnesses, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, +) { + let round_function = ZkSyncDefaultRoundFunction::default(); + + let mut storage_impl = InMemoryStorage::new(); + let mut tree = ZKSyncTestingTree::empty(); + + test_artifact.entry_point_address = + *zk_evm::zkevm_opcode_defs::system_params::BOOTLOADER_FORMAL_ADDRESS; + + let predeployed_contracts = test_artifact + .predeployed_contracts + .clone() + .into_iter() + .chain(Some(( + test_artifact.entry_point_address, + test_artifact.entry_point_code.clone(), + ))) + .collect::>(); + save_predeployed_contracts(&mut storage_impl, &mut tree, &predeployed_contracts); + + let used_bytecodes = HashMap::from_iter( + test_artifact + .predeployed_contracts + .iter() + .map(|(_, bytecode)| { + ( + bytecode_to_code_hash(&bytecode).unwrap().into(), + bytecode.clone(), + ) + }) + .chain( + Some(test_artifact.default_account_code.clone()).map(|bytecode| { + ( + bytecode_to_code_hash(&bytecode).unwrap().into(), + bytecode.clone(), + ) + }), + ), + ); + + let previous_enumeration_index = tree.next_enumeration_index(); + let previous_root = tree.root(); + // simualate content hash + + let mut hasher = Keccak256::new(); + hasher.update(&previous_enumeration_index.to_be_bytes()); + hasher.update(&previous_root); + hasher.update(&0u64.to_be_bytes()); // porter shard + hasher.update(&[0u8; 32]); // porter shard + + let mut previous_data_hash = [0u8; 32]; + (&mut previous_data_hash[..]).copy_from_slice(&hasher.finalize().as_slice()); + + let previous_aux_hash = [0u8; 32]; + let previous_meta_hash = [0u8; 32]; + + let mut hasher = Keccak256::new(); + hasher.update(&previous_data_hash); + hasher.update(&previous_meta_hash); + hasher.update(&previous_aux_hash); + + let mut previous_content_hash = [0u8; 32]; + (&mut previous_content_hash[..]).copy_from_slice(&hasher.finalize().as_slice()); + + let default_account_codehash = + bytecode_to_code_hash(&test_artifact.default_account_code).unwrap(); + let default_account_codehash = U256::from_big_endian(&default_account_codehash); + + let ( + basic_block_circuits, + basic_block_circuits_inputs, + closed_form_inputs, + scheduler_partial_input, + _, + ) = run( + Address::zero(), + test_artifact.entry_point_address, + test_artifact.entry_point_code, + vec![], + false, + default_account_codehash, + used_bytecodes, + vec![], + cycle_limit, + round_function.clone(), + geometry, + storage_impl, + &mut tree, + ); + + ( + basic_block_circuits, + basic_block_circuits_inputs, + closed_form_inputs, + scheduler_partial_input, + ) +} diff --git a/core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs b/core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs new file mode 100644 index 000000000000..bd112dc4211a --- /dev/null +++ b/core/bin/vk_setup_data_generator_server_fri/src/vk_generator.rs @@ -0,0 +1,48 @@ +use circuit_definitions::boojum::worker::Worker; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; +use circuit_definitions::{BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR}; +use zkevm_test_harness::geometry_config::get_geometry_config; +use zkevm_test_harness::prover_utils::{ + create_base_layer_setup_data, create_recursive_layer_setup_data, +}; +use zksync_vk_setup_data_server_fri::utils::{get_basic_circuits, get_leaf_circuits, CYCLE_LIMIT}; +use zksync_vk_setup_data_server_fri::{save_base_layer_vk, save_recursive_layer_vk}; + +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerVerificationKey; + +fn main() { + vlog::info!("starting vk generator"); + generate_basic_circuit_vks(); +} + +pub fn generate_basic_circuit_vks() { + let worker = Worker::new(); + for circuit in get_basic_circuits(CYCLE_LIMIT, get_geometry_config()) { + let circuit_type = circuit.numeric_circuit_type(); + let (_, _, vk, _, _, _, _) = create_base_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + let typed_vk = ZkSyncBaseLayerVerificationKey::from_inner(circuit_type, vk); + save_base_layer_vk(typed_vk); + } +} + +pub fn generate_leaf_layer_vks() { + let worker = Worker::new(); + for circuit in get_leaf_circuits() { + let circuit_type = circuit.numeric_circuit_type(); + let (_setup_base, _setup, vk, _setup_tree, _vars_hint, _wits_hint, _finalization_hint) = + create_recursive_layer_setup_data( + circuit.clone(), + &worker, + BASE_LAYER_FRI_LDE_FACTOR, + BASE_LAYER_CAP_SIZE, + ); + + let typed_vk = ZkSyncRecursionLayerVerificationKey::from_inner(circuit_type, vk.clone()); + save_recursive_layer_vk(typed_vk); + } +} diff --git a/core/bin/witness_generator/Cargo.toml b/core/bin/witness_generator/Cargo.toml index be022dfdf223..46b0fc88d614 100644 --- a/core/bin/witness_generator/Cargo.toml +++ b/core/bin/witness_generator/Cargo.toml @@ -10,6 +10,11 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] publish = false # We don't want to publish our binaries. +[lib] +name = "zksync_witness_utils" +path = "src/utils.rs" + + [dependencies] zksync_dal = { path = "../../lib/dal", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } @@ -21,18 +26,21 @@ zksync_object_store = { path = "../../lib/object_store", version = "1.0" } zksync_types = { path = "../../lib/types", version = "1.0" } zksync_state = { path = "../../lib/state", version = "1.0" } zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } +vk_setup_data_generator_server_fri = { path = "../vk_setup_data_generator_server_fri", version = "1.0" } zksync_prover_utils = { path = "../../lib/prover_utils", version = "1.0" } -zksync_db_storage_provider = { path = "../../lib/db_storage_provider", version = "1.0" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0" } +circuit_definitions = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.4.0", features = ["log_tracing"]} +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0" } tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } metrics = "0.20" serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" async-trait = "0.1" +bincode = "1" rand = "0.8" +hex = "0.4" structopt = "0.3.26" - -[dev-dependencies] -bincode = "1" -const-decoder = "0.3.0" +ctrlc = { version = "3.1", features = ["termination"] } diff --git a/core/bin/witness_generator/README.md b/core/bin/witness_generator/README.md index 00a805e0b7e5..0600487e3d0f 100644 --- a/core/bin/witness_generator/README.md +++ b/core/bin/witness_generator/README.md @@ -15,7 +15,7 @@ aggregation. That is, every aggregation round needs two sets of input: ## BasicCircuitsWitnessGenerator - generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): -- input table: `basic_circuit_witness_jobs` +- input table: `basic_circuit_witness_jobs` - artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 diff --git a/core/bin/witness_generator/rust-toolchain.toml b/core/bin/witness_generator/rust-toolchain.toml new file mode 100644 index 000000000000..5d56faf9ae08 --- /dev/null +++ b/core/bin/witness_generator/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" diff --git a/core/bin/witness_generator/src/basic_circuits.rs b/core/bin/witness_generator/src/basic_circuits.rs index 079497465008..8ad7d30c9a3d 100644 --- a/core/bin/witness_generator/src/basic_circuits.rs +++ b/core/bin/witness_generator/src/basic_circuits.rs @@ -1,57 +1,67 @@ -use std::cell::RefCell; -use std::collections::hash_map::DefaultHasher; -use std::collections::{HashMap, HashSet}; -use std::hash::{Hash, Hasher}; -use std::rc::Rc; +use std::hash::Hash; use std::sync::Arc; -use std::time::Instant; +use std::{ + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + time::Instant, +}; use async_trait::async_trait; +use circuit_definitions::ZkSyncDefaultRoundFunction; use rand::Rng; use serde::{Deserialize, Serialize}; +use zkevm_test_harness::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use zkevm_test_harness::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use zkevm_test_harness::geometry_config::get_geometry_config; +use zkevm_test_harness::toolset::GeometryConfig; +use zkevm_test_harness::witness::full_block_artifact::{ + BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, + BlockBasicCircuitsPublicInputs, +}; +use zkevm_test_harness::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; +use zkevm_test_harness::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use vm::zk_evm::ethereum_types::H256; -use vm::{memory::SimpleMemory, HistoryDisabled, StorageOracle, MAX_CYCLES_FOR_TX}; -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_config::constants::BOOTLOADER_ADDRESS; +use vm::{HistoryDisabled, StorageOracle, MAX_CYCLES_FOR_TX}; +use zksync_config::configs::FriWitnessGeneratorConfig; +use zksync_dal::fri_witness_generator_dal::FriWitnessJobStatus; use zksync_dal::ConnectionPool; -use zksync_db_storage_provider::DbStorageProvider; -use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; +use zksync_object_store::{ + Bucket, ClosedFormInputKey, ObjectStore, ObjectStoreFactory, StoredObject, +}; use zksync_queued_job_processor::JobProcessor; -use zksync_state::storage_view::StorageView; -use zksync_types::zkevm_test_harness::toolset::GeometryConfig; +use zksync_state::{PostgresStorage, StorageView}; +use zksync_types::proofs::AggregationRound; use zksync_types::{ - circuit::GEOMETRY_CONFIG, - proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, - zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::bn256::Bn256, - witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, - witness::oracle::VmWitnessOracle, - SchedulerCircuitInstanceWitness, - }, - Address, L1BatchNumber, U256, + proofs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, + Address, L1BatchNumber, BOOTLOADER_ADDRESS, H256, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; -use crate::precalculated::PrecalculatedMerklePathsProvider; +use crate::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; use crate::utils::{ - expand_bootloader_contents, save_prover_input_artifacts, track_witness_generation_stage, + expand_bootloader_contents, get_recursive_layer_circuit_id_for_base_layer, + save_base_prover_input_artifacts, AuxOutputWitnessWrapper, ClosedFormInputWrapper, + SchedulerPartialInputWrapper, }; pub struct BasicCircuitArtifacts { - basic_circuits: BlockBasicCircuits, - basic_circuits_inputs: BlockBasicCircuitsPublicInputs, - scheduler_witness: SchedulerCircuitInstanceWitness, - circuits: Vec>>, + basic_circuits: BlockBasicCircuits, + basic_circuits_inputs: BlockBasicCircuitsPublicInputs, + per_circuit_closed_form_inputs: BlockBasicCircuitsPublicCompactFormsWitnesses, + #[allow(dead_code)] + scheduler_witness: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + aux_output_witness: BlockAuxilaryOutputWitness, } #[derive(Debug)] struct BlobUrls { - basic_circuits_url: String, - basic_circuits_inputs_url: String, + circuit_ids_and_urls: Vec<(u8, String)>, + closed_form_inputs_and_urls: Vec<(u8, String, usize)>, scheduler_witness_url: String, - circuit_types_and_urls: Vec<(&'static str, String)>, } #[derive(Clone)] @@ -62,99 +72,83 @@ pub struct BasicWitnessGeneratorJob { #[derive(Debug)] pub struct BasicWitnessGenerator { - config: WitnessGeneratorConfig, + config: Arc, object_store: Arc, + public_blob_store: Box, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, } impl BasicWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: FriWitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + public_blob_store: Box, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { - config, - object_store: store_factory.create_store().into(), + config: Arc::new(config), + object_store: store_factory.create_store().await.into(), + public_blob_store, + connection_pool, + prover_connection_pool, } } - fn process_job_sync( - object_store: &dyn ObjectStore, + async fn process_job_impl( + object_store: Arc, connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, + config: Arc, ) -> Option { - let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); let BasicWitnessGeneratorJob { block_number, job } = basic_job; + let shall_force_process_block = config + .force_process_block + .map_or(false, |block| block == block_number.0); if let Some(blocks_proving_percentage) = config.blocks_proving_percentage { // Generate random number in (0; 100). let threshold = rand::thread_rng().gen_range(1..100); // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. // In this case job should be skipped. - if threshold > blocks_proving_percentage { - metrics::counter!("server.witness_generator.skipped_blocks", 1); + if threshold > blocks_proving_percentage && !shall_force_process_block { + metrics::counter!("server.witness_generator_fri.skipped_blocks", 1); vlog::info!( "Skipping witness generation for block {}, blocks_proving_percentage: {}", block_number.0, blocks_proving_percentage ); - let mut storage = connection_pool.access_storage_blocking(); - storage - .witness_generator_dal() - .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); + + let mut prover_storage = prover_connection_pool.access_storage().await; + prover_storage + .fri_witness_generator_dal() + .mark_witness_job(FriWitnessJobStatus::Skipped, block_number) + .await; return None; } } - metrics::counter!("server.witness_generator.sampled_blocks", 1); + metrics::counter!("server.witness_generator_fri.sampled_blocks", 1); vlog::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::BasicCircuits, block_number.0 ); - Some(process_basic_circuits_job( - object_store, - config, - connection_pool, - started_at, - block_number, - job, - )) - } - - fn get_artifacts(&self, block_number: L1BatchNumber) -> BasicWitnessGeneratorJob { - let job = self.object_store.get(block_number).unwrap(); - BasicWitnessGeneratorJob { block_number, job } - } - - fn save_artifacts( - &self, - block_number: L1BatchNumber, - artifacts: BasicCircuitArtifacts, - ) -> BlobUrls { - let basic_circuits_url = self - .object_store - .put(block_number, &artifacts.basic_circuits) - .unwrap(); - let basic_circuits_inputs_url = self - .object_store - .put(block_number, &artifacts.basic_circuits_inputs) - .unwrap(); - let scheduler_witness_url = self - .object_store - .put(block_number, &artifacts.scheduler_witness) - .unwrap(); - - let circuit_types_and_urls = save_prover_input_artifacts( - block_number, - &artifacts.circuits, - &*self.object_store, - AggregationRound::BasicCircuits, - ); - BlobUrls { - basic_circuits_url, - basic_circuits_inputs_url, - scheduler_witness_url, - circuit_types_and_urls, - } + Some( + process_basic_circuits_job( + &*object_store, + config, + connection_pool, + started_at, + block_number, + job, + ) + .await, + ) } } @@ -165,65 +159,64 @@ impl JobProcessor for BasicWitnessGenerator { // The artifact is optional to support skipping blocks when sampling is enabled. type JobArtifacts = Option; - const SERVICE_NAME: &'static str = "basic_circuit_witness_generator"; + const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - match connection - .witness_generator_dal() - .get_next_basic_circuit_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ) { - Some(metadata) => { - let job = self.get_artifacts(metadata.block_number); - Some((job.block_number, job)) + match prover_connection + .fri_witness_generator_dal() + .get_next_basic_circuit_witness_job(last_l1_batch_to_process) + .await + { + Some(block_number) => { + vlog::info!( + "Processing FRI basic witness-gen for block {}", + block_number + ); + let started_at = Instant::now(); + let job = get_artifacts(block_number, &*self.object_store).await; + metrics::histogram!( + "prover_fri.witness_generation.blob_fetch_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), + ); + Some((block_number, job)) } None => None, } } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) { - connection_pool - .access_storage_blocking() - .witness_generator_dal() - .mark_witness_job_as_failed( - job_id, - AggregationRound::BasicCircuits, - started_at.elapsed(), - error, - self.config.max_attempts, - ); + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .access_storage() + .await + .fri_witness_generator_dal() + .mark_witness_job_failed(&error, job_id) + .await; } #[allow(clippy::async_yields_async)] async fn process_job( &self, - connection_pool: ConnectionPool, job: BasicWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { + let config = Arc::clone(&self.config); let object_store = Arc::clone(&self.object_store); - tokio::task::spawn_blocking(move || { - Self::process_job_sync(&*object_store, connection_pool, job, started_at) - }) + tokio::spawn(Self::process_job_impl( + object_store, + self.connection_pool.clone(), + self.prover_connection_pool.clone(), + job, + started_at, + config, + )) } async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, optional_artifacts: Option, @@ -231,94 +224,208 @@ impl JobProcessor for BasicWitnessGenerator { match optional_artifacts { None => (), Some(artifacts) => { - let blob_urls = self.save_artifacts(job_id, artifacts); - update_database(connection_pool, started_at, job_id, blob_urls); + let blob_started_at = Instant::now(); + let blob_urls = save_artifacts( + job_id, + artifacts, + &*self.object_store, + &*self.public_blob_store, + ) + .await; + metrics::histogram!( + "prover_fri.witness_generation.blob_save_time", + blob_started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), + ); + update_database(&self.prover_connection_pool, started_at, job_id, blob_urls).await; } } } } -fn process_basic_circuits_job( +async fn process_basic_circuits_job( object_store: &dyn ObjectStore, - config: WitnessGeneratorConfig, + config: Arc, connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, job: PrepareBasicCircuitsJob, ) -> BasicCircuitArtifacts { let witness_gen_input = - build_basic_circuits_witness_generator_input(connection_pool.clone(), job, block_number); - let (basic_circuits, basic_circuits_inputs, scheduler_witness) = - generate_witness(object_store, config, connection_pool, witness_gen_input); - let circuits = basic_circuits.clone().into_flattened_set(); - + build_basic_circuits_witness_generator_input(&connection_pool, job, block_number).await; + let ( + basic_circuits, + basic_circuits_inputs, + per_circuit_closed_form_inputs, + scheduler_witness, + aux_output_witness, + ) = generate_witness(object_store, config, connection_pool, witness_gen_input).await; + metrics::histogram!( + "prover_fri.witness_generation.witness_generation_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), + ); vlog::info!( - "Witness generation for block {} is complete in {:?}. Number of circuits: {}", + "Witness generation for block {} is complete in {:?}", block_number.0, - started_at.elapsed(), - circuits.len() + started_at.elapsed() ); BasicCircuitArtifacts { basic_circuits, basic_circuits_inputs, + per_circuit_closed_form_inputs, scheduler_witness, - circuits, + aux_output_witness, } } -fn update_database( - connection_pool: ConnectionPool, +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, blob_urls: BlobUrls, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + let mut prover_connection = prover_connection_pool.access_storage().await; - transaction.witness_generator_dal().create_aggregation_jobs( - block_number, - &blob_urls.basic_circuits_url, - &blob_urls.basic_circuits_inputs_url, - blob_urls.circuit_types_and_urls.len(), - &blob_urls.scheduler_witness_url, - ); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, - AggregationRound::BasicCircuits, - ); - transaction - .witness_generator_dal() - .mark_witness_job_as_successful( + prover_connection + .fri_prover_jobs_dal() + .insert_prover_jobs( block_number, + blob_urls.circuit_ids_and_urls, AggregationRound::BasicCircuits, - started_at.elapsed(), - ); + 0, + ) + .await; + prover_connection + .fri_witness_generator_dal() + .create_aggregation_jobs( + block_number, + &blob_urls.closed_form_inputs_and_urls, + &blob_urls.scheduler_witness_url, + get_recursive_layer_circuit_id_for_base_layer, + ) + .await; + prover_connection + .fri_witness_generator_dal() + .mark_witness_job_as_successful(block_number, started_at.elapsed()) + .await; +} - transaction.commit_blocking(); - track_witness_generation_stage(started_at, AggregationRound::BasicCircuits); +async fn get_artifacts( + block_number: L1BatchNumber, + object_store: &dyn ObjectStore, +) -> BasicWitnessGeneratorJob { + let job = object_store.get(block_number).await.unwrap(); + BasicWitnessGeneratorJob { block_number, job } +} + +async fn save_artifacts( + block_number: L1BatchNumber, + artifacts: BasicCircuitArtifacts, + object_store: &dyn ObjectStore, + public_object_store: &dyn ObjectStore, +) -> BlobUrls { + let circuit_ids_and_urls = save_base_prover_input_artifacts( + block_number, + artifacts.basic_circuits, + object_store, + AggregationRound::BasicCircuits, + ) + .await; + let closed_form_inputs_and_urls = save_leaf_aggregation_artifacts( + block_number, + artifacts.basic_circuits_inputs, + artifacts.per_circuit_closed_form_inputs, + object_store, + ) + .await; + let scheduler_witness_url = save_scheduler_artifacts( + block_number, + artifacts.scheduler_witness, + artifacts.aux_output_witness, + object_store, + public_object_store, + ) + .await; + + BlobUrls { + circuit_ids_and_urls, + closed_form_inputs_and_urls, + scheduler_witness_url, + } +} + +async fn save_scheduler_artifacts( + block_number: L1BatchNumber, + scheduler_partial_input: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + aux_output_witness: BlockAuxilaryOutputWitness, + object_store: &dyn ObjectStore, + public_object_store: &dyn ObjectStore, +) -> String { + let aux_output_witness_wrapper = AuxOutputWitnessWrapper(aux_output_witness); + public_object_store + .put(block_number, &aux_output_witness_wrapper) + .await + .unwrap(); + object_store + .put(block_number, &aux_output_witness_wrapper) + .await + .unwrap(); + let wrapper = SchedulerPartialInputWrapper(scheduler_partial_input); + object_store.put(block_number, &wrapper).await.unwrap() +} + +async fn save_leaf_aggregation_artifacts( + block_number: L1BatchNumber, + basic_circuits_inputs: BlockBasicCircuitsPublicInputs, + per_circuit_closed_form_inputs: BlockBasicCircuitsPublicCompactFormsWitnesses, + object_store: &dyn ObjectStore, +) -> Vec<(u8, String, usize)> { + let round_function = ZkSyncDefaultRoundFunction::default(); + let queues = basic_circuits_inputs + .into_recursion_queues(per_circuit_closed_form_inputs, &round_function); + let mut circuit_id_urls_with_count = Vec::with_capacity(queues.len()); + for (circuit_id_ref, recursion_queue_simulator, inputs) in queues { + let circuit_id = circuit_id_ref as u8; + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = inputs.len(); + let wrapper = ClosedFormInputWrapper(inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + circuit_id_urls_with_count.push((circuit_id, blob_url, basic_circuit_count)) + } + circuit_id_urls_with_count } // If making changes to this method, consider moving this logic to the DAL layer and make // `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. -fn build_basic_circuits_witness_generator_input( - connection_pool: ConnectionPool, +async fn build_basic_circuits_witness_generator_input( + connection_pool: &ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, block_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { - let mut connection = connection_pool.access_storage_blocking(); + let mut connection = connection_pool.access_storage().await; let block_header = connection .blocks_dal() .get_block_header(block_number) + .await .unwrap(); let previous_block_header = connection .blocks_dal() .get_block_header(block_number - 1) + .await .unwrap(); let previous_block_hash = connection .blocks_dal() .get_block_state_root(block_number - 1) + .await .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { block_number, @@ -331,32 +438,41 @@ fn build_basic_circuits_witness_generator_input( } } -fn generate_witness( +async fn generate_witness( object_store: &dyn ObjectStore, - config: WitnessGeneratorConfig, + config: Arc, connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, ) -> ( - BlockBasicCircuits, - BlockBasicCircuitsPublicInputs, - SchedulerCircuitInstanceWitness, + BlockBasicCircuits, + BlockBasicCircuitsPublicInputs, + BlockBasicCircuitsPublicCompactFormsWitnesses, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + BlockAuxilaryOutputWitness, ) { - let mut connection = connection_pool.access_storage_blocking(); + let mut connection = connection_pool.access_storage().await; let header = connection .blocks_dal() .get_block_header(input.block_number) + .await .unwrap(); let bootloader_code_bytes = connection .storage_dal() .get_factory_dep(header.base_system_contracts_hashes.bootloader) + .await .expect("Bootloader bytecode should exist"); let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); let account_bytecode_bytes = connection .storage_dal() .get_factory_dep(header.base_system_contracts_hashes.default_aa) + .await .expect("Default aa bytecode should exist"); let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - let bootloader_contents = expand_bootloader_contents(input.initial_heap_content); + let bootloader_contents = expand_bootloader_contents(&input.initial_heap_content); let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); let hashes: HashSet = input @@ -367,49 +483,44 @@ fn generate_witness( .map(|hash| u256_to_h256(*hash)) .collect(); - let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes); + let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes).await; if input.used_bytecodes_hashes.contains(&account_code_hash) { used_bytecodes.insert(account_code_hash, account_bytecode); } - let factory_dep_bytecode_hashes: HashSet = used_bytecodes - .clone() - .keys() - .map(|&hash| u256_to_h256(hash)) - .collect(); - let missing_deps: HashSet<_> = hashes - .difference(&factory_dep_bytecode_hashes) - .cloned() - .collect(); - if !missing_deps.is_empty() { - vlog::error!("{:?} factory deps are not found in DB", missing_deps); - } + + assert_eq!( + hashes.len(), + used_bytecodes.len(), + "{} factory deps are not found in DB", + hashes.len() - used_bytecodes.len() + ); // `DbStorageProvider` was designed to be used in API, so it accepts miniblock numbers. // Probably, we should make it work with L1 batch numbers too. let (_, last_miniblock_number) = connection .blocks_dal() .get_miniblock_range_of_l1_batch(input.block_number - 1) + .await .expect("L1 batch should contain at least one miniblock"); - let db_storage_provider = DbStorageProvider::new(connection, last_miniblock_number, true); + drop(connection); + let mut tree = PrecalculatedMerklePathsProvider::new( input.merkle_paths_input, input.previous_block_hash.0, ); - - let storage_ptr: &mut dyn vm::storage::Storage = &mut StorageView::new(db_storage_provider); - let storage_oracle = StorageOracle::::new(Rc::new(RefCell::new(storage_ptr))); - let memory = SimpleMemory::::default(); + let geometry_config = get_geometry_config(); let mut hasher = DefaultHasher::new(); - GEOMETRY_CONFIG.hash(&mut hasher); + geometry_config.hash(&mut hasher); vlog::info!( "generating witness for block {} using geometry config hash: {}", input.block_number.0, hasher.finish() ); - if config + + let should_dump_arguments = config .dump_arguments_for_blocks - .contains(&input.block_number.0) - { + .contains(&input.block_number.0); + if should_dump_arguments { save_run_with_fixed_params_args_to_gcs( object_store, input.block_number.0, @@ -423,30 +534,41 @@ fn generate_witness( used_bytecodes.clone(), Vec::default(), MAX_CYCLES_FOR_TX as usize, - GEOMETRY_CONFIG, + geometry_config, tree.clone(), - ); + ) + .await; } - zksync_types::zkevm_test_harness::external_calls::run_with_fixed_params( - Address::zero(), - BOOTLOADER_ADDRESS, - bootloader_code, - bootloader_contents, - false, - account_code_hash, - used_bytecodes, - Vec::default(), - MAX_CYCLES_FOR_TX as usize, - GEOMETRY_CONFIG, - storage_oracle, - memory, - &mut tree, - ) + // The following part is CPU-heavy, so we move it to a separate thread. + let rt_handle = tokio::runtime::Handle::current(); + tokio::task::spawn_blocking(move || { + let connection = rt_handle.block_on(connection_pool.access_storage()); + let storage = PostgresStorage::new(rt_handle, connection, last_miniblock_number, true); + let storage_view = &mut StorageView::new(storage); + let storage_oracle: StorageOracle = + StorageOracle::new(storage_view.as_ptr()); + zkevm_test_harness::external_calls::run_with_fixed_params( + Address::zero(), + BOOTLOADER_ADDRESS, + bootloader_code, + bootloader_contents, + false, + account_code_hash, + used_bytecodes, + Vec::default(), + MAX_CYCLES_FOR_TX as usize, + geometry_config, + storage_oracle, + &mut tree, + ) + }) + .await + .unwrap() } #[allow(clippy::too_many_arguments)] -fn save_run_with_fixed_params_args_to_gcs( +async fn save_run_with_fixed_params_args_to_gcs( object_store: &dyn ObjectStore, l1_batch_number: u32, last_miniblock_number: u32, @@ -479,24 +601,25 @@ fn save_run_with_fixed_params_args_to_gcs( }; object_store .put(L1BatchNumber(l1_batch_number), &run_with_fixed_params_input) + .await .unwrap(); } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -struct RunWithFixedParamsInput { - l1_batch_number: u32, - last_miniblock_number: u32, - caller: Address, - entry_point_address: Address, - entry_point_code: Vec<[u8; 32]>, - initial_heap_content: Vec, - zk_porter_is_available: bool, - default_aa_code_hash: U256, - used_bytecodes: HashMap>, - ram_verification_queries: Vec<(u32, U256)>, - cycle_limit: usize, - geometry: GeometryConfig, - tree: PrecalculatedMerklePathsProvider, +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct RunWithFixedParamsInput { + pub l1_batch_number: u32, + pub last_miniblock_number: u32, + pub caller: Address, + pub entry_point_address: Address, + pub entry_point_code: Vec<[u8; 32]>, + pub initial_heap_content: Vec, + pub zk_porter_is_available: bool, + pub default_aa_code_hash: U256, + pub used_bytecodes: HashMap>, + pub ram_verification_queries: Vec<(u32, U256)>, + pub cycle_limit: usize, + pub geometry: GeometryConfig, + pub tree: PrecalculatedMerklePathsProvider, } impl StoredObject for RunWithFixedParamsInput { diff --git a/core/bin/witness_generator/src/leaf_aggregation.rs b/core/bin/witness_generator/src/leaf_aggregation.rs index 8d4ca0391909..7818c326dff7 100644 --- a/core/bin/witness_generator/src/leaf_aggregation.rs +++ b/core/bin/witness_generator/src/leaf_aggregation.rs @@ -1,58 +1,78 @@ -use std::collections::HashMap; +use zkevm_test_harness::witness::recursive_aggregation::{ + compute_leaf_params, create_leaf_witnesses, +}; + use std::time::Instant; use async_trait::async_trait; +use circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, ZkSyncBaseLayerVerificationKey, +}; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; +use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; +use zksync_vk_setup_data_server_fri::{ + get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, +}; -use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; -use zksync_config::configs::WitnessGeneratorConfig; +use crate::utils::{ + get_recursive_layer_circuit_id_for_base_layer, load_proofs_for_job_ids, + save_node_aggregations_artifacts, save_recursive_layer_prover_input_artifacts, + ClosedFormInputWrapper, FriProofWrapper, +}; +use zkevm_test_harness::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; +use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - circuit::LEAF_SPLITTING_FACTOR, - proofs::{AggregationRound, PrepareLeafAggregationCircuitsJob, WitnessGeneratorJobMetadata}, - zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, - bellman::plonk::better_better_cs::setup::VerificationKey, - encodings::recursion_request::RecursionRequest, encodings::QueueSimulator, witness, - witness::oracle::VmWitnessOracle, LeafAggregationOutputDataWitness, - }, - L1BatchNumber, -}; -use zksync_verification_key_server::{ - get_ordered_vks_for_basic_circuits, get_vks_for_basic_circuits, get_vks_for_commitment, -}; +use zksync_types::proofs::{AggregationRound, LeafAggregationJobMetadata}; +use zksync_types::L1BatchNumber; pub struct LeafAggregationArtifacts { - leaf_layer_subqueues: Vec, 2, 2>>, - aggregation_outputs: Vec>, - leaf_circuits: Vec>>, + circuit_id: u8, + block_number: L1BatchNumber, + aggregations: Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, + #[allow(dead_code)] + closed_form_inputs: Vec>, } #[derive(Debug)] struct BlobUrls { - leaf_layer_subqueues_url: String, - aggregation_outputs_url: String, - circuit_types_and_urls: Vec<(&'static str, String)>, + circuit_ids_and_urls: Vec<(u8, String)>, + aggregations_urls: String, } -#[derive(Clone)] pub struct LeafAggregationWitnessGeneratorJob { + circuit_id: u8, block_number: L1BatchNumber, - job: PrepareLeafAggregationCircuitsJob, + closed_form_inputs: ClosedFormInputWrapper, + proofs: Vec, + base_vk: ZkSyncBaseLayerVerificationKey, + leaf_params: RecursionLeafParametersWitness, } #[derive(Debug)] pub struct LeafAggregationWitnessGenerator { - config: WitnessGeneratorConfig, + #[allow(dead_code)] + config: FriWitnessGeneratorConfig, object_store: Box, + prover_connection_pool: ConnectionPool, } impl LeafAggregationWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: FriWitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + prover_connection_pool, } } @@ -60,113 +80,49 @@ impl LeafAggregationWitnessGenerator { leaf_job: LeafAggregationWitnessGeneratorJob, started_at: Instant, ) -> LeafAggregationArtifacts { - let LeafAggregationWitnessGeneratorJob { block_number, job } = leaf_job; - vlog::info!( - "Starting witness generation of type {:?} for block {}", + "Starting witness generation of type {:?} for block {} with circuit {}", AggregationRound::LeafAggregation, - block_number.0 + leaf_job.block_number.0, + leaf_job.circuit_id, ); - process_leaf_aggregation_job(started_at, block_number, job) - } - - fn get_artifacts( - &self, - metadata: WitnessGeneratorJobMetadata, - ) -> LeafAggregationWitnessGeneratorJob { - let basic_circuits = self.object_store.get(metadata.block_number).unwrap(); - let basic_circuits_inputs = self.object_store.get(metadata.block_number).unwrap(); - - LeafAggregationWitnessGeneratorJob { - block_number: metadata.block_number, - job: PrepareLeafAggregationCircuitsJob { - basic_circuits_inputs, - basic_circuits_proofs: metadata.proofs, - basic_circuits, - }, - } - } - - fn save_artifacts( - &self, - block_number: L1BatchNumber, - artifacts: LeafAggregationArtifacts, - ) -> BlobUrls { - let leaf_layer_subqueues_url = self - .object_store - .put(block_number, &artifacts.leaf_layer_subqueues) - .unwrap(); - let aggregation_outputs_url = self - .object_store - .put(block_number, &artifacts.aggregation_outputs) - .unwrap(); - let circuit_types_and_urls = save_prover_input_artifacts( - block_number, - &artifacts.leaf_circuits, - &*self.object_store, - AggregationRound::LeafAggregation, - ); - BlobUrls { - leaf_layer_subqueues_url, - aggregation_outputs_url, - circuit_types_and_urls, - } + process_leaf_aggregation_job(started_at, leaf_job) } } #[async_trait] impl JobProcessor for LeafAggregationWitnessGenerator { type Job = LeafAggregationWitnessGeneratorJob; - type JobId = L1BatchNumber; + type JobId = u32; type JobArtifacts = LeafAggregationArtifacts; - const SERVICE_NAME: &'static str = "leaf_aggregation_witness_generator"; - - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - - match connection - .witness_generator_dal() - .get_next_leaf_aggregation_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ) { - Some(metadata) => { - let job = self.get_artifacts(metadata); - Some((job.block_number, job)) - } - None => None, - } + const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; + let metadata = prover_connection + .fri_witness_generator_dal() + .get_next_leaf_aggregation_job() + .await?; + vlog::info!("Processing node aggregation job {:?}", metadata.id); + Some(( + metadata.id, + prepare_leaf_aggregation_job(metadata, &*self.object_store).await, + )) } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) { - connection_pool - .access_storage_blocking() - .witness_generator_dal() - .mark_witness_job_as_failed( - job_id, - AggregationRound::LeafAggregation, - started_at.elapsed(), - error, - self.config.max_attempts, - ); + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .access_storage() + .await + .fri_witness_generator_dal() + .mark_leaf_aggregation_job_failed(&error, job_id) + .await; } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: LeafAggregationWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -175,117 +131,181 @@ impl JobProcessor for LeafAggregationWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, + job_id: u32, started_at: Instant, artifacts: LeafAggregationArtifacts, ) { - let leaf_circuits_len = artifacts.leaf_circuits.len(); - let blob_urls = self.save_artifacts(job_id, artifacts); + let block_number = artifacts.block_number; + let circuit_id = artifacts.circuit_id; + let blob_urls = save_artifacts(artifacts, &*self.object_store).await; update_database( - connection_pool, + &self.prover_connection_pool, started_at, + block_number, job_id, - leaf_circuits_len, blob_urls, - ); + circuit_id, + ) + .await; } } -fn process_leaf_aggregation_job( +async fn prepare_leaf_aggregation_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, +) -> LeafAggregationWitnessGeneratorJob { + let started_at = Instant::now(); + let closed_form_input = get_artifacts(&metadata, object_store).await; + let proofs = load_proofs_for_job_ids(&metadata.prover_job_ids_for_proofs, object_store).await; + metrics::histogram!( + "prover_fri.witness_generation.blob_fetch_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), + ); + let started_at = Instant::now(); + let base_vk = get_base_layer_vk_for_circuit_type(metadata.circuit_id); + // this is a temp solution to unblock shadow proving. + // we should have a method that converts basic circuit id to leaf circuit id as they are different. + let leaf_vk = get_recursive_layer_vk_for_circuit_type(metadata.circuit_id + 2); + let base_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(base_proof) => base_proof, + FriProofWrapper::Recursive(_) => { + panic!("Expected only base proofs for leaf agg {}", metadata.id) + } + }) + .collect::>(); + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + metrics::histogram!( + "prover_fri.witness_generation.prepare_job_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), + ); + LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs: base_proofs, + base_vk, + leaf_params, + } +} + +pub fn process_leaf_aggregation_job( started_at: Instant, - block_number: L1BatchNumber, - job: PrepareLeafAggregationCircuitsJob, + job: LeafAggregationWitnessGeneratorJob, ) -> LeafAggregationArtifacts { - let stage_started_at = Instant::now(); - - let verification_keys: HashMap< - u8, - VerificationKey>>, - > = get_vks_for_basic_circuits(); - - vlog::info!( - "Verification keys loaded in {:?}", - stage_started_at.elapsed() + let circuit_id = job.circuit_id; + let subsets = ( + circuit_id as u64, + job.closed_form_inputs.1, + job.closed_form_inputs.0, ); - - // we need the list of vks that matches the list of job.basic_circuit_proofs - let vks_for_aggregation: Vec< - VerificationKey>>, - > = get_ordered_vks_for_basic_circuits(&job.basic_circuits, &verification_keys); - - let (all_vk_committments, set_committment, g2_points) = - witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( - verification_keys, - )); - - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); - - let stage_started_at = Instant::now(); - - let (leaf_layer_subqueues, aggregation_outputs, leaf_circuits) = - witness::recursive_aggregation::prepare_leaf_aggregations( - job.basic_circuits, - job.basic_circuits_inputs, - job.basic_circuits_proofs, - vks_for_aggregation, - LEAF_SPLITTING_FACTOR, - all_vk_committments, - set_committment, - g2_points, - ); - - vlog::info!( - "prepare_leaf_aggregations took {:?}", - stage_started_at.elapsed() + let leaf_params = (circuit_id, job.leaf_params); + let (aggregations, closed_form_inputs) = + create_leaf_witnesses(subsets, job.proofs, job.base_vk, leaf_params); + metrics::histogram!( + "prover_fri.witness_generation.witness_generation_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), ); vlog::info!( - "Leaf witness generation for block {} is complete in {:?}. Number of circuits: {}", - block_number.0, + "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", + job.block_number.0, + circuit_id, started_at.elapsed(), - leaf_circuits.len() ); LeafAggregationArtifacts { - leaf_layer_subqueues, - aggregation_outputs, - leaf_circuits, + circuit_id, + block_number: job.block_number, + aggregations, + closed_form_inputs, } } -fn update_database( - connection_pool: ConnectionPool, +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, - leaf_circuits_len: usize, + job_id: u32, blob_urls: BlobUrls, + circuit_id: u8, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); - - // inserts artifacts into the node_aggregation_witness_jobs table - // and advances it to waiting_for_proofs status + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; + let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); transaction - .witness_generator_dal() - .save_leaf_aggregation_artifacts( + .fri_prover_jobs_dal() + .insert_prover_jobs( block_number, - leaf_circuits_len, - &blob_urls.leaf_layer_subqueues_url, - &blob_urls.aggregation_outputs_url, - ); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, - AggregationRound::LeafAggregation, - ); + blob_urls.circuit_ids_and_urls, + AggregationRound::LeafAggregation, + 0, + ) + .await; transaction - .witness_generator_dal() - .mark_witness_job_as_successful( + .fri_witness_generator_dal() + .update_node_aggregation_jobs_url( block_number, - AggregationRound::LeafAggregation, - started_at.elapsed(), - ); + get_recursive_layer_circuit_id_for_base_layer(circuit_id), + number_of_dependent_jobs, + 0, + blob_urls.aggregations_urls, + ) + .await; + transaction + .fri_witness_generator_dal() + .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) + .await; - transaction.commit_blocking(); - track_witness_generation_stage(started_at, AggregationRound::LeafAggregation); + transaction.commit().await; +} + +async fn get_artifacts( + metadata: &LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, +) -> ClosedFormInputWrapper { + let key = ClosedFormInputKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + }; + object_store + .get(key) + .await + .unwrap_or_else(|_| panic!("leaf aggregation job artifacts missing: {:?}", key)) +} + +async fn save_artifacts( + artifacts: LeafAggregationArtifacts, + object_store: &dyn ObjectStore, +) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + 0, + artifacts.aggregations.clone(), + object_store, + ) + .await; + let circuit_ids_and_urls = save_recursive_layer_prover_input_artifacts( + artifacts.block_number, + artifacts.aggregations, + AggregationRound::LeafAggregation, + 0, + object_store, + None, + ) + .await; + metrics::histogram!( + "prover_fri.witness_generation.blob_save_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), + ); + BlobUrls { + circuit_ids_and_urls, + aggregations_urls, + } } diff --git a/core/bin/witness_generator/src/main.rs b/core/bin/witness_generator/src/main.rs index a6a6622be4a5..03eea4251f8a 100644 --- a/core/bin/witness_generator/src/main.rs +++ b/core/bin/witness_generator/src/main.rs @@ -1,25 +1,28 @@ -use std::time::Instant; +#![feature(generic_const_exprs)] -use futures::StreamExt; use prometheus_exporter::run_prometheus_exporter; -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_config::ZkSyncConfig; -use zksync_dal::ConnectionPool; +use std::time::Instant; +use structopt::StructOpt; +use tokio::sync::watch; +use zksync_config::configs::{AlertsConfig, FriWitnessGeneratorConfig, PrometheusConfig}; +use zksync_config::ObjectStoreConfig; +use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_object_store::ObjectStoreFactory; -use zksync_prover_utils::{get_stop_signal_receiver, wait_for_tasks}; +use zksync_prover_utils::get_stop_signal_receiver; use zksync_queued_job_processor::JobProcessor; use zksync_types::proofs::AggregationRound; +use zksync_types::web3::futures::StreamExt; +use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::basic_circuits::BasicWitnessGenerator; use crate::leaf_aggregation::LeafAggregationWitnessGenerator; use crate::node_aggregation::NodeAggregationWitnessGenerator; use crate::scheduler::SchedulerWitnessGenerator; -use structopt::StructOpt; mod basic_circuits; mod leaf_aggregation; mod node_aggregation; -mod precalculated; +mod precalculated_merkle_paths_provider; mod scheduler; mod utils; @@ -39,11 +42,17 @@ struct Opt { #[tokio::main] async fn main() { + vlog::init(); + let sentry_guard = vlog::init_sentry(); + match sentry_guard { + Some(_) => vlog::info!( + "Starting Sentry url: {}", + std::env::var("MISC_SENTRY_URL").unwrap(), + ), + None => vlog::info!("No sentry url configured"), + } + let opt = Opt::from_args(); - let _sentry_guard = vlog::init(); - let connection_pool = ConnectionPool::new(None, true); - let zksync_config = ZkSyncConfig::from_env(); - let (stop_sender, stop_receiver) = tokio::sync::watch::channel::(false); let started_at = Instant::now(); vlog::info!( "initializing the {:?} witness generator, batch size: {:?}", @@ -52,28 +61,60 @@ async fn main() { ); let use_push_gateway = opt.batch_size.is_some(); - let config = WitnessGeneratorConfig::from_env(); let store_factory = ObjectStoreFactory::from_env(); + let config = FriWitnessGeneratorConfig::from_env(); + let prometheus_config = PrometheusConfig::from_env(); + let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; + let prover_connection_pool = ConnectionPool::new(None, DbVariant::Prover).await; + let (stop_sender, stop_receiver) = watch::channel(false); + let witness_generator_task = match opt.round { AggregationRound::BasicCircuits => { - let generator = BasicWitnessGenerator::new(config, &store_factory); - generator.run(connection_pool, stop_receiver, opt.batch_size) + let public_blob_store = ObjectStoreFactory::new(ObjectStoreConfig::public_from_env()) + .create_store() + .await; + let generator = BasicWitnessGenerator::new( + config, + &store_factory, + public_blob_store, + connection_pool, + prover_connection_pool, + ) + .await; + generator.run(stop_receiver, opt.batch_size) } AggregationRound::LeafAggregation => { - let generator = LeafAggregationWitnessGenerator::new(config, &store_factory); - generator.run(connection_pool, stop_receiver, opt.batch_size) + let generator = LeafAggregationWitnessGenerator::new( + config, + &store_factory, + prover_connection_pool, + ) + .await; + generator.run(stop_receiver, opt.batch_size) } AggregationRound::NodeAggregation => { - let generator = NodeAggregationWitnessGenerator::new(config, &store_factory); - generator.run(connection_pool, stop_receiver, opt.batch_size) + let generator = + NodeAggregationWitnessGenerator::new(&store_factory, prover_connection_pool).await; + generator.run(stop_receiver, opt.batch_size) } AggregationRound::Scheduler => { - let generator = SchedulerWitnessGenerator::new(config, &store_factory); - generator.run(connection_pool, stop_receiver, opt.batch_size) + let generator = + SchedulerWitnessGenerator::new(&store_factory, prover_connection_pool).await; + generator.run(stop_receiver, opt.batch_size) } }; - - let witness_generator_task = tokio::spawn(witness_generator_task); + let tasks = vec![ + run_prometheus_exporter( + prometheus_config.listener_port, + use_push_gateway.then(|| { + ( + prometheus_config.pushgateway_url.clone(), + prometheus_config.push_interval(), + ) + }), + ), + tokio::spawn(witness_generator_task), + ]; vlog::info!( "initialized {:?} witness generator in {:?}", opt.round, @@ -82,19 +123,20 @@ async fn main() { metrics::gauge!( "server.init.latency", started_at.elapsed(), - "stage" => format!("witness_generator_{:?}", opt.round) + "stage" => format!("fri_witness_generator_{:?}", opt.round) ); - let tasks = vec![ - run_prometheus_exporter(zksync_config.api.prometheus, use_push_gateway), - witness_generator_task, - ]; let mut stop_signal_receiver = get_stop_signal_receiver(); + let particular_crypto_alerts = Some(AlertsConfig::from_env().sporadic_crypto_errors_substrs); + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = false; tokio::select! { - _ = wait_for_tasks(tasks) => {}, + _ = wait_for_tasks(tasks, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = stop_signal_receiver.next() => { vlog::info!("Stop signal received, shutting down"); - }, + } } - let _ = stop_sender.send(true); + + stop_sender.send(true).ok(); + vlog::info!("Finished witness generation"); } diff --git a/core/bin/witness_generator/src/node_aggregation.rs b/core/bin/witness_generator/src/node_aggregation.rs index 3179db434943..c71f70c51f22 100644 --- a/core/bin/witness_generator/src/node_aggregation.rs +++ b/core/bin/witness_generator/src/node_aggregation.rs @@ -1,124 +1,122 @@ -use std::collections::HashMap; -use std::env; use std::time::Instant; use async_trait::async_trait; +use circuit_definitions::boojum::field::goldilocks::GoldilocksField; +use circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, +}; +use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; -use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - circuit::{ - LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, - }, - proofs::{AggregationRound, PrepareNodeAggregationCircuitJob, WitnessGeneratorJobMetadata}, - zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::bn256::Bn256, - bellman::plonk::better_better_cs::setup::VerificationKey, - ff::to_hex, - witness::{ - self, - oracle::VmWitnessOracle, - recursive_aggregation::{erase_vk_type, padding_aggregations}, - }, - NodeAggregationOutputDataWitness, - }, - L1BatchNumber, +use zkevm_test_harness::witness::recursive_aggregation::{ + compute_node_vk_commitment, create_node_witnesses, }; -use zksync_verification_key_server::{ - get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, +use zkevm_test_harness::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; +use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; +use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; + +use crate::utils::{ + load_proofs_for_job_ids, save_node_aggregations_artifacts, + save_recursive_layer_prover_input_artifacts, AggregationWrapper, FriProofWrapper, }; +use zksync_dal::ConnectionPool; +use zksync_object_store::{AggregationsKey, ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::proofs::NodeAggregationJobMetadata; +use zksync_types::{proofs::AggregationRound, L1BatchNumber}; pub struct NodeAggregationArtifacts { - final_node_aggregation: NodeAggregationOutputDataWitness, - node_circuits: Vec>>, + circuit_id: u8, + block_number: L1BatchNumber, + depth: u16, + next_aggregations: Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, } #[derive(Debug)] struct BlobUrls { node_aggregations_url: String, - circuit_types_and_urls: Vec<(&'static str, String)>, + circuit_ids_and_urls: Vec<(u8, String)>, } #[derive(Clone)] pub struct NodeAggregationWitnessGeneratorJob { + circuit_id: u8, block_number: L1BatchNumber, - job: PrepareNodeAggregationCircuitJob, + depth: u16, + aggregations: Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, + proofs: Vec, + leaf_vk: ZkSyncRecursionLayerVerificationKey, + node_vk: ZkSyncRecursionLayerVerificationKey, + all_leafs_layer_params: Vec<(u8, RecursionLeafParametersWitness)>, } #[derive(Debug)] pub struct NodeAggregationWitnessGenerator { - config: WitnessGeneratorConfig, object_store: Box, + prover_connection_pool: ConnectionPool, } impl NodeAggregationWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + store_factory: &ObjectStoreFactory, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { - config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + prover_connection_pool, } } fn process_job_sync( - node_job: NodeAggregationWitnessGeneratorJob, + job: NodeAggregationWitnessGeneratorJob, started_at: Instant, ) -> NodeAggregationArtifacts { - let config: WitnessGeneratorConfig = WitnessGeneratorConfig::from_env(); - let NodeAggregationWitnessGeneratorJob { block_number, job } = node_job; - + let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); vlog::info!( - "Starting witness generation of type {:?} for block {}", + "Starting witness generation of type {:?} for block {} circuit id {} depth {}", AggregationRound::NodeAggregation, - block_number.0 + job.block_number.0, + job.circuit_id, + job.depth ); - process_node_aggregation_job(config, started_at, block_number, job) - } - - fn get_artifacts( - &self, - metadata: WitnessGeneratorJobMetadata, - ) -> NodeAggregationWitnessGeneratorJob { - let leaf_layer_subqueues = self - .object_store - .get(metadata.block_number) - .expect("leaf_layer_subqueues not found in queued `node_aggregation_witness_jobs` job"); - let aggregation_outputs = self - .object_store - .get(metadata.block_number) - .expect("aggregation_outputs not found in queued `node_aggregation_witness_jobs` job"); - - NodeAggregationWitnessGeneratorJob { - block_number: metadata.block_number, - job: PrepareNodeAggregationCircuitJob { - previous_level_proofs: metadata.proofs, - previous_level_leafs_aggregations: aggregation_outputs, - previous_sequence: leaf_layer_subqueues, - }, - } - } - - fn save_artifacts( - &self, - block_number: L1BatchNumber, - artifacts: NodeAggregationArtifacts, - ) -> BlobUrls { - let node_aggregations_url = self - .object_store - .put(block_number, &artifacts.final_node_aggregation) - .unwrap(); - let circuit_types_and_urls = save_prover_input_artifacts( - block_number, - &artifacts.node_circuits, - &*self.object_store, - AggregationRound::NodeAggregation, + let vk = match job.depth { + 0 => job.leaf_vk, + _ => job.node_vk, + }; + let next_aggregations = create_node_witnesses( + job.aggregations, + job.proofs, + vk, + node_vk_commitment, + &job.all_leafs_layer_params, + ); + metrics::histogram!( + "prover_fri.witness_generation.witness_generation_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), ); - BlobUrls { - node_aggregations_url, - circuit_types_and_urls, + vlog::info!( + "Node witness generation for block {} with circuit id {} at depth {} with {} next_aggregations jobs completed in {:?}.", + job.block_number.0, + job.circuit_id, + job.depth, + next_aggregations.len(), + started_at.elapsed(), + ); + + NodeAggregationArtifacts { + circuit_id: job.circuit_id, + block_number: job.block_number, + depth: job.depth + 1, + next_aggregations, } } } @@ -126,56 +124,36 @@ impl NodeAggregationWitnessGenerator { #[async_trait] impl JobProcessor for NodeAggregationWitnessGenerator { type Job = NodeAggregationWitnessGeneratorJob; - type JobId = L1BatchNumber; + type JobId = u32; type JobArtifacts = NodeAggregationArtifacts; - const SERVICE_NAME: &'static str = "node_aggregation_witness_generator"; - - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - - match connection - .witness_generator_dal() - .get_next_node_aggregation_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ) { - Some(metadata) => { - let job = self.get_artifacts(metadata); - return Some((job.block_number, job)); - } - None => None, - } + const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; + let metadata = prover_connection + .fri_witness_generator_dal() + .get_next_node_aggregation_job() + .await?; + vlog::info!("Processing node aggregation job {:?}", metadata.id); + Some(( + metadata.id, + prepare_job(metadata, &*self.object_store).await, + )) } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) { - connection_pool - .access_storage_blocking() - .witness_generator_dal() - .mark_witness_job_as_failed( - job_id, - AggregationRound::NodeAggregation, - started_at.elapsed(), - error, - self.config.max_attempts, - ); + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .access_storage() + .await + .fri_witness_generator_dal() + .mark_node_aggregation_job_failed(&error, job_id) + .await; } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: NodeAggregationWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -184,150 +162,182 @@ impl JobProcessor for NodeAggregationWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, + job_id: u32, started_at: Instant, artifacts: NodeAggregationArtifacts, ) { - let blob_urls = self.save_artifacts(job_id, artifacts); - update_database(connection_pool, started_at, job_id, blob_urls); + let block_number = artifacts.block_number; + let circuit_id = artifacts.circuit_id; + let depth = artifacts.depth; + let shall_continue_node_aggregations = artifacts.next_aggregations.len() > 1; + let blob_urls = save_artifacts(artifacts, &*self.object_store).await; + update_database( + &self.prover_connection_pool, + started_at, + job_id, + block_number, + depth, + circuit_id, + blob_urls, + shall_continue_node_aggregations, + ) + .await; } } -pub fn process_node_aggregation_job( - config: WitnessGeneratorConfig, - started_at: Instant, - block_number: L1BatchNumber, - job: PrepareNodeAggregationCircuitJob, -) -> NodeAggregationArtifacts { - let stage_started_at = Instant::now(); - zksync_prover_utils::ensure_initial_setup_keys_present( - &config.initial_setup_key_path, - &config.key_download_url, - ); - env::set_var("CRS_FILE", config.initial_setup_key_path); - vlog::info!("Keys loaded in {:?}", stage_started_at.elapsed()); - let stage_started_at = Instant::now(); - - let verification_keys: HashMap< - u8, - VerificationKey>>, - > = get_vks_for_basic_circuits(); - - let padding_aggregations = padding_aggregations(NODE_SPLITTING_FACTOR); - - let (_, set_committment, g2_points) = - witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( - verification_keys, - )); - - let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); - - let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); - - let (_, leaf_aggregation_vk_committment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - leaf_aggregation_vk.clone(), - )); - - let (_, node_aggregation_vk_committment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - node_aggregation_vk, - )); - - vlog::info!( - "commitments: basic set: {:?}, leaf: {:?}, node: {:?}", - to_hex(&set_committment), - to_hex(&leaf_aggregation_vk_committment), - to_hex(&node_aggregation_vk_committment) - ); - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); - - // fs::write("previous_level_proofs.bincode", bincode::serialize(&job.previous_level_proofs).unwrap()).unwrap(); - // fs::write("leaf_aggregation_vk.bincode", bincode::serialize(&leaf_aggregation_vk).unwrap()).unwrap(); - // fs::write("previous_level_leafs_aggregations.bincode", bincode::serialize(&job.previous_level_leafs_aggregations).unwrap()).unwrap(); - // fs::write("previous_sequence.bincode", bincode::serialize(&job.previous_sequence).unwrap()).unwrap(); - // fs::write("padding_aggregations.bincode", bincode::serialize(&padding_aggregations).unwrap()).unwrap(); - // fs::write("set_committment.bincode", bincode::serialize(&set_committment).unwrap()).unwrap(); - // fs::write("node_aggregation_vk_committment.bincode", bincode::serialize(&node_aggregation_vk_committment).unwrap()).unwrap(); - // fs::write("leaf_aggregation_vk_committment.bincode", bincode::serialize(&leaf_aggregation_vk_committment).unwrap()).unwrap(); - // fs::write("g2_points.bincode", bincode::serialize(&g2_points).unwrap()).unwrap(); - - let stage_started_at = Instant::now(); - let (_, final_node_aggregations, node_circuits) = - zksync_types::zkevm_test_harness::witness::recursive_aggregation::prepare_node_aggregations( - job.previous_level_proofs, - leaf_aggregation_vk, - true, - 0, - job.previous_level_leafs_aggregations, - Vec::default(), - job.previous_sequence, - LEAF_SPLITTING_FACTOR, - NODE_SPLITTING_FACTOR, - padding_aggregations, - set_committment, - node_aggregation_vk_committment, - leaf_aggregation_vk_committment, - g2_points, - ); - - vlog::info!( - "prepare_node_aggregations took {:?}", - stage_started_at.elapsed() - ); - - assert_eq!( - node_circuits.len(), - 1, - "prepare_node_aggregations returned more than one circuit" +async fn prepare_job( + metadata: NodeAggregationJobMetadata, + object_store: &dyn ObjectStore, +) -> NodeAggregationWitnessGeneratorJob { + let started_at = Instant::now(); + let artifacts = get_artifacts(&metadata, object_store).await; + let proofs = load_proofs_for_job_ids(&metadata.prover_job_ids_for_proofs, object_store).await; + metrics::histogram!( + "prover_fri.witness_generation.blob_fetch_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), ); - assert_eq!( - final_node_aggregations.len(), - 1, - "prepare_node_aggregations returned more than one node aggregation" + let started_at = Instant::now(); + let leaf_vk = get_recursive_layer_vk_for_circuit_type(metadata.circuit_id); + let node_vk = get_recursive_layer_vk_for_circuit_type( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, ); - vlog::info!( - "Node witness generation for block {} is complete in {:?}. Number of circuits: {}", - block_number.0, - started_at.elapsed(), - node_circuits.len() + let recursive_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(_) => { + panic!( + "Expected only recursive proofs for node agg {}", + metadata.id + ) + } + FriProofWrapper::Recursive(recursive_proof) => recursive_proof, + }) + .collect::>(); + + metrics::histogram!( + "prover_fri.witness_generation.job_preparation_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), ); - - NodeAggregationArtifacts { - final_node_aggregation: final_node_aggregations.into_iter().next().unwrap(), - node_circuits, + NodeAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + depth: metadata.depth, + aggregations: artifacts.0, + proofs: recursive_proofs, + leaf_vk, + node_vk, + all_leafs_layer_params: get_leaf_vk_params(), } } -fn update_database( - connection_pool: ConnectionPool, +#[allow(clippy::too_many_arguments)] +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, + id: u32, block_number: L1BatchNumber, + depth: u16, + circuit_id: u8, blob_urls: BlobUrls, + shall_continue_node_aggregations: bool, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; + let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); + match shall_continue_node_aggregations { + true => { + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + block_number, + blob_urls.circuit_ids_and_urls, + AggregationRound::NodeAggregation, + depth, + ) + .await; + transaction + .fri_witness_generator_dal() + .insert_node_aggregation_jobs( + block_number, + circuit_id, + Some(dependent_jobs as i32), + depth, + &blob_urls.node_aggregations_url, + ) + .await; + } + false => { + let (_, blob_url) = blob_urls.circuit_ids_and_urls[0].clone(); + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + block_number, + circuit_id, + depth, + 0, + AggregationRound::NodeAggregation, + &blob_url, + true, + ) + .await + } + } - // inserts artifacts into the scheduler_witness_jobs table - // and advances it to waiting_for_proofs status transaction - .witness_generator_dal() - .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, + .fri_witness_generator_dal() + .mark_node_aggregation_as_successful(id, started_at.elapsed()) + .await; + + transaction.commit().await; +} + +async fn get_artifacts( + metadata: &NodeAggregationJobMetadata, + object_store: &dyn ObjectStore, +) -> AggregationWrapper { + let key = AggregationsKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + depth: metadata.depth, + }; + object_store + .get(key) + .await + .unwrap_or_else(|_| panic!("node aggregation job artifacts missing: {:?}", key)) +} + +async fn save_artifacts( + artifacts: NodeAggregationArtifacts, + object_store: &dyn ObjectStore, +) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + artifacts.circuit_id, + artifacts.depth, + artifacts.next_aggregations.clone(), + object_store, + ) + .await; + let circuit_ids_and_urls = save_recursive_layer_prover_input_artifacts( + artifacts.block_number, + artifacts.next_aggregations, AggregationRound::NodeAggregation, + artifacts.depth, + object_store, + Some(artifacts.circuit_id), + ) + .await; + metrics::histogram!( + "prover_fri.witness_generation.blob_save_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), ); - transaction - .witness_generator_dal() - .mark_witness_job_as_successful( - block_number, - AggregationRound::NodeAggregation, - started_at.elapsed(), - ); - - transaction.commit_blocking(); - track_witness_generation_stage(started_at, AggregationRound::NodeAggregation); + BlobUrls { + node_aggregations_url: aggregations_urls, + circuit_ids_and_urls, + } } diff --git a/core/bin/witness_generator/src/precalculated/mod.rs b/core/bin/witness_generator/src/precalculated_merkle_paths_provider.rs similarity index 79% rename from core/bin/witness_generator/src/precalculated/mod.rs rename to core/bin/witness_generator/src/precalculated_merkle_paths_provider.rs index cc3cc24376fb..56c56b157cb4 100644 --- a/core/bin/witness_generator/src/precalculated/mod.rs +++ b/core/bin/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -1,40 +1,30 @@ use serde::{Deserialize, Serialize}; -use std::collections::VecDeque; +use zkevm_test_harness::witness::tree::{BinaryHasher, EnumeratedBinaryLeaf, LeafQuery}; use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; -use zksync_types::zkevm_test_harness::blake2::Blake2s256; -use zksync_types::zkevm_test_harness::witness::tree::BinaryHasher; -use zksync_types::zkevm_test_harness::witness::tree::{ - BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, -}; -#[cfg(test)] -mod tests; +use zk_evm::blake2::Blake2s256; +use zkevm_test_harness::witness::tree::BinarySparseStorageTree; +use zkevm_test_harness::witness::tree::ZkSyncStorageLeaf; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { // We keep the root hash of the last processed leaf, as it is needed by the the witness generator. - root_hash: [u8; 32], - // The list of expected leaves to be interacted with. - pending_leaves: VecDeque, - // The index that would be assigned to the next new leaf. - next_enumeration_index: u64, + pub root_hash: [u8; 32], + // The ordered list of expected leaves to be interacted with + pub pending_leaves: Vec, + // The index that would be assigned to the next new leaf + pub next_enumeration_index: u64, // For every Storage Write Log we expect two invocations: `get_leaf` and `insert_leaf`. // We set this flag to `true` after the initial `get_leaf` is invoked. - is_get_leaf_invoked: bool, + pub is_get_leaf_invoked: bool, } impl PrecalculatedMerklePathsProvider { pub fn new(input: PrepareBasicCircuitsJob, root_hash: [u8; 32]) -> Self { let next_enumeration_index = input.next_enumeration_index(); - vlog::debug!( - "Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, \ - initial next_enumeration_index: {:?}", - root_hash, - next_enumeration_index - ); - + vlog::debug!("Initializing PrecalculatedMerklePathsProvider. Initial root_hash: {:?}, initial next_enumeration_index: {:?}", root_hash, next_enumeration_index); Self { root_hash, pending_leaves: input.into_merkle_paths().collect(), @@ -76,7 +66,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> !self.is_get_leaf_invoked, "`get_leaf()` invoked more than once or get_leaf is invoked when insert_leaf was expected" ); - let next = self.pending_leaves.front().unwrap_or_else(|| { + let next = self.pending_leaves.first().unwrap_or_else(|| { panic!( "invoked `get_leaf({:?})` with empty `pending_leaves`", index @@ -108,7 +98,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> } } else { // If it is a read, the next invocation will relate to the next `pending_leaf` - self.pending_leaves.pop_front(); + self.pending_leaves.remove(0); }; res @@ -130,7 +120,7 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> self.is_get_leaf_invoked, "`get_leaf()` is expected to be invoked before `insert_leaf()`" ); - let next = self.pending_leaves.pop_front().unwrap(); + let next = self.pending_leaves.remove(0); self.root_hash = next.root_hash; assert!( @@ -190,11 +180,15 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> ); let mut first_writes = vec![]; let mut updates = vec![]; - - let mut write_pending_leaves = self.pending_leaves.iter().filter(|log| log.is_write); - let write_pending_leaves = &mut write_pending_leaves; - for ((pending_leaf, idx), mut leaf) in - write_pending_leaves.zip(&mut indexes).zip(&mut leafs) + let write_pending_leaves = self + .pending_leaves + .iter() + .filter(|&l| l.is_write) + .collect::>(); + let write_pending_leaves_iter = write_pending_leaves.iter(); + let mut length = 0; + for (&pending_leaf, (idx, mut leaf)) in + write_pending_leaves_iter.zip((&mut indexes).zip(&mut leafs)) { leaf.set_index(pending_leaf.leaf_enumeration_index); if pending_leaf.first_write { @@ -202,25 +196,24 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> } else { updates.push(leaf); } + length += 1; } - - let length = first_writes.len() + updates.len(); - assert!( - write_pending_leaves.next().is_none(), + assert_eq!( + length, + write_pending_leaves.len(), "pending leaves: len({}) must be of same length as leafs and indexes: len({})", - write_pending_leaves.count() + 1 + length, - // ^ 1 is added because of `next()` getting called in the assertion condition + write_pending_leaves.len(), length ); assert!( indexes.next().is_none(), "indexes must be of same length as leafs and pending leaves: len({})", - length + write_pending_leaves.len() ); assert!( leafs.next().is_none(), "leafs must be of same length as indexes and pending leaves: len({})", - length + write_pending_leaves.len() ); (self.next_enumeration_index, first_writes, updates) } @@ -236,27 +229,33 @@ impl BinarySparseStorageTree<256, 32, 32, 8, 32, Blake2s256, ZkSyncStorageLeaf> root ); - let mut leaf_bytes = [0_u8; 8 + 32]; + let mut leaf_bytes = vec![0u8; 8 + 32]; // can make a scratch space somewhere later on leaf_bytes[8..].copy_from_slice(query.leaf.value()); + let leaf_index_bytes = query.leaf.current_index().to_be_bytes(); leaf_bytes[0..8].copy_from_slice(&leaf_index_bytes); + let leaf_hash = Blake2s256::leaf_hash(&leaf_bytes); let mut current_hash = leaf_hash; for level in 0..256 { - let (lhs, rhs) = if is_right_side_node(&query.index, level) { + let (l, r) = if is_right_side_node(&query.index, level) { (&query.merkle_path[level], ¤t_hash) } else { (¤t_hash, &query.merkle_path[level]) }; - current_hash = Blake2s256::node_hash(level, lhs, rhs); + + let this_level_hash = Blake2s256::node_hash(level, l, r); + + current_hash = this_level_hash; } - *root == current_hash + + root == ¤t_hash } } -fn is_right_side_node(index: &[u8], depth: usize) -> bool { - debug_assert!(depth < index.len() * 8); +fn is_right_side_node(index: &[u8; N], depth: usize) -> bool { + debug_assert!(depth < N * 8); let byte_idx = depth / 8; let bit_idx = depth % 8; diff --git a/core/bin/witness_generator/src/scheduler.rs b/core/bin/witness_generator/src/scheduler.rs index 46a40c7a3d37..152179a6a5d3 100644 --- a/core/bin/witness_generator/src/scheduler.rs +++ b/core/bin/witness_generator/src/scheduler.rs @@ -1,103 +1,100 @@ -use std::collections::HashMap; -use std::slice; +use std::convert::TryInto; + use std::time::Instant; use async_trait::async_trait; - -use crate::utils::{save_prover_input_artifacts, track_witness_generation_stage}; -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - circuit::{ - LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, NODE_SPLITTING_FACTOR, - }, - proofs::{AggregationRound, PrepareSchedulerCircuitJob, WitnessGeneratorJobMetadata}, - zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, - sync_vm::scheduler::BlockApplicationWitness, - witness::{self, oracle::VmWitnessOracle, recursive_aggregation::erase_vk_type}, - }, - L1BatchNumber, +use circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; +use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; +use circuit_definitions::circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, ZkSyncRecursionProof, + ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, }; -use zksync_verification_key_server::{ - get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment, +use circuit_definitions::recursion_layer_proof_config; +use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; +use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; +use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; + +use crate::utils::{ + load_proofs_for_job_ids, CircuitWrapper, FriProofWrapper, SchedulerPartialInputWrapper, }; +use zksync_dal::ConnectionPool; +use zksync_object_store::{FriCircuitKey, ObjectStore, ObjectStoreFactory}; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::proofs::AggregationRound; +use zksync_types::L1BatchNumber; pub struct SchedulerArtifacts { - final_aggregation_result: BlockApplicationWitness, - scheduler_circuit: ZkSyncCircuit>, + scheduler_circuit: ZkSyncRecursiveLayerCircuit, } #[derive(Clone)] pub struct SchedulerWitnessGeneratorJob { block_number: L1BatchNumber, - job: PrepareSchedulerCircuitJob, + scheduler_witness: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + node_vk: ZkSyncRecursionLayerVerificationKey, } #[derive(Debug)] pub struct SchedulerWitnessGenerator { - config: WitnessGeneratorConfig, object_store: Box, + prover_connection_pool: ConnectionPool, } impl SchedulerWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + store_factory: &ObjectStoreFactory, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { - config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + prover_connection_pool, } } fn process_job_sync( - scheduler_job: SchedulerWitnessGeneratorJob, + job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> SchedulerArtifacts { - let SchedulerWitnessGeneratorJob { block_number, job } = scheduler_job; - vlog::info!( - "Starting witness generation of type {:?} for block {}", + "Starting fri witness generation of type {:?} for block {}", AggregationRound::Scheduler, - block_number.0 + job.block_number.0 + ); + let config = SchedulerConfig { + proof_config: recursion_layer_proof_config(), + vk_fixed_parameters: job.node_vk.into_inner().fixed_parameters, + capacity: SCHEDULER_CAPACITY, + _marker: std::marker::PhantomData, + }; + + let scheduler_circuit = SchedulerCircuit { + witness: job.scheduler_witness, + config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + metrics::histogram!( + "prover_fri.witness_generation.witness_generation_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), ); - process_scheduler_job(started_at, block_number, job) - } - fn get_artifacts( - &self, - metadata: WitnessGeneratorJobMetadata, - previous_aux_hash: [u8; 32], - previous_meta_hash: [u8; 32], - ) -> SchedulerWitnessGeneratorJob { - let scheduler_witness = self.object_store.get(metadata.block_number).unwrap(); - let final_node_aggregations = self.object_store.get(metadata.block_number).unwrap(); + vlog::info!( + "Scheduler generation for block {} is complete in {:?}", + job.block_number.0, + started_at.elapsed() + ); - SchedulerWitnessGeneratorJob { - block_number: metadata.block_number, - job: PrepareSchedulerCircuitJob { - incomplete_scheduler_witness: scheduler_witness, - final_node_aggregations, - node_final_proof_level_proof: metadata.proofs.into_iter().next().unwrap(), - previous_aux_hash, - previous_meta_hash, - }, + SchedulerArtifacts { + scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), } } - - fn save_artifacts( - &self, - block_number: L1BatchNumber, - scheduler_circuit: &ZkSyncCircuit>, - ) -> Vec<(&'static str, String)> { - save_prover_input_artifacts( - block_number, - slice::from_ref(scheduler_circuit), - &*self.object_store, - AggregationRound::Scheduler, - ) - } } #[async_trait] @@ -106,61 +103,56 @@ impl JobProcessor for SchedulerWitnessGenerator { type JobId = L1BatchNumber; type JobArtifacts = SchedulerArtifacts; - const SERVICE_NAME: &'static str = "scheduler_witness_generator"; - - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - - match connection - .witness_generator_dal() - .get_next_scheduler_witness_job( - self.config.witness_generation_timeout(), - self.config.max_attempts, - last_l1_batch_to_process, - ) { - Some(metadata) => { - let prev_metadata = connection - .blocks_dal() - .get_block_metadata(metadata.block_number - 1); - let previous_aux_hash = prev_metadata - .as_ref() - .map_or([0u8; 32], |e| e.metadata.aux_data_hash.0); - let previous_meta_hash = - prev_metadata.map_or([0u8; 32], |e| e.metadata.meta_parameters_hash.0); - let job = self.get_artifacts(metadata, previous_aux_hash, previous_meta_hash); - Some((job.block_number, job)) - } - None => None, - } + const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; + + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; + + let l1_batch_number = prover_connection + .fri_witness_generator_dal() + .get_next_scheduler_witness_job() + .await?; + let proof_job_ids = prover_connection + .fri_scheduler_dependency_tracker_dal() + .get_final_prover_job_ids_for(l1_batch_number) + .await; + let started_at = Instant::now(); + let proofs = load_proofs_for_job_ids(&proof_job_ids, &*self.object_store).await; + metrics::histogram!( + "prover_fri.witness_generation.blob_fetch_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), + ); + let recursive_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(_) => { + panic!( + "Expected only recursive proofs for scheduler l1 batch {}", + l1_batch_number + ) + } + FriProofWrapper::Recursive(recursive_proof) => recursive_proof.into_inner(), + }) + .collect::>(); + Some(( + l1_batch_number, + prepare_job(l1_batch_number, recursive_proofs, &*self.object_store).await, + )) } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) { - connection_pool - .access_storage_blocking() - .witness_generator_dal() - .mark_witness_job_as_failed( - job_id, - AggregationRound::Scheduler, - started_at.elapsed(), - error, - self.config.max_attempts, - ); + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .access_storage() + .await + .fri_witness_generator_dal() + .mark_scheduler_job_failed(&error, job_id) + .await; } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -169,148 +161,85 @@ impl JobProcessor for SchedulerWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, artifacts: SchedulerArtifacts, ) { - let circuit_types_and_urls = self.save_artifacts(job_id, &artifacts.scheduler_circuit); - update_database( - connection_pool, - started_at, - job_id, - artifacts.final_aggregation_result, - circuit_types_and_urls, - ); - } -} - -pub fn process_scheduler_job( - started_at: Instant, - block_number: L1BatchNumber, - job: PrepareSchedulerCircuitJob, -) -> SchedulerArtifacts { - let stage_started_at = Instant::now(); - - let verification_keys: HashMap< - u8, - VerificationKey>>, - > = get_vks_for_basic_circuits(); - - let (_, set_committment, g2_points) = - witness::recursive_aggregation::form_base_circuits_committment(get_vks_for_commitment( - verification_keys, - )); - - vlog::info!( - "Verification keys loaded in {:?}", - stage_started_at.elapsed() - ); - - let leaf_aggregation_vk = get_vk_for_circuit_type(LEAF_CIRCUIT_INDEX); - - let node_aggregation_vk = get_vk_for_circuit_type(NODE_CIRCUIT_INDEX); - - let (_, leaf_aggregation_vk_committment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - leaf_aggregation_vk, - )); - - let (_, node_aggregation_vk_committment) = - witness::recursive_aggregation::compute_vk_encoding_and_committment(erase_vk_type( - node_aggregation_vk.clone(), - )); - - vlog::info!("Commitments generated in {:?}", stage_started_at.elapsed()); - let stage_started_at = Instant::now(); - - let (scheduler_circuit, final_aggregation_result) = - witness::recursive_aggregation::prepare_scheduler_circuit( - job.incomplete_scheduler_witness, - job.node_final_proof_level_proof, - node_aggregation_vk, - job.final_node_aggregations, - set_committment, - node_aggregation_vk_committment, - leaf_aggregation_vk_committment, - job.previous_aux_hash, - job.previous_meta_hash, - (LEAF_SPLITTING_FACTOR * NODE_SPLITTING_FACTOR) as u32, - g2_points, + let key = FriCircuitKey { + block_number: job_id, + circuit_id: 1, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::Scheduler, + }; + let blob_save_started_at = Instant::now(); + let scheduler_circuit_blob_url = self + .object_store + .put(key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit)) + .await + .unwrap(); + metrics::histogram!( + "prover_fri.witness_generation.blob_save_time", + blob_save_started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), ); - vlog::info!( - "prepare_scheduler_circuit took {:?}", - stage_started_at.elapsed() - ); + let mut prover_connection = self.prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + job_id, + 1, + 0, + 0, + AggregationRound::Scheduler, + &scheduler_circuit_blob_url, + false, + ) + .await; - vlog::info!( - "Scheduler generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); + transaction + .fri_witness_generator_dal() + .mark_scheduler_job_as_successful(job_id, started_at.elapsed()) + .await; - SchedulerArtifacts { - final_aggregation_result, - scheduler_circuit, + transaction.commit().await; } } -pub fn update_database( - connection_pool: ConnectionPool, - started_at: Instant, - block_number: L1BatchNumber, - final_aggregation_result: BlockApplicationWitness, - circuit_types_and_urls: Vec<(&'static str, String)>, -) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); - let block = transaction - .blocks_dal() - .get_block_metadata(block_number) - .expect("L1 batch should exist"); - - assert_eq!( - block.metadata.aux_data_hash.0, final_aggregation_result.aux_data_hash, - "Commitment for aux data is wrong" - ); - - assert_eq!( - block.metadata.pass_through_data_hash.0, final_aggregation_result.passthrough_data_hash, - "Commitment for pass through data is wrong" - ); - - assert_eq!( - block.metadata.meta_parameters_hash.0, final_aggregation_result.meta_data_hash, - "Commitment for metadata is wrong" - ); - - assert_eq!( - block.metadata.commitment.0, final_aggregation_result.block_header_hash, - "Commitment is wrong" +async fn prepare_job( + l1_batch_number: L1BatchNumber, + proofs: Vec, + object_store: &dyn ObjectStore, +) -> SchedulerWitnessGeneratorJob { + let started_at = Instant::now(); + let node_vk = get_recursive_layer_vk_for_circuit_type( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, ); - - transaction.prover_dal().insert_prover_jobs( - block_number, - circuit_types_and_urls, - AggregationRound::Scheduler, + let SchedulerPartialInputWrapper(mut scheduler_witness) = + object_store.get(l1_batch_number).await.unwrap(); + scheduler_witness.node_layer_vk_witness = node_vk.clone().into_inner(); + + scheduler_witness.proof_witnesses = proofs.into(); + + let leaf_vk_commits = get_leaf_vk_params(); + let leaf_layer_params = leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + scheduler_witness.leaf_layer_parameters = leaf_layer_params; + metrics::histogram!( + "prover_fri.witness_generation.prepare_job_time", + started_at.elapsed(), + "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), ); - transaction - .witness_generator_dal() - .save_final_aggregation_result( - block_number, - final_aggregation_result.aggregation_result_coords, - ); - - transaction - .witness_generator_dal() - .mark_witness_job_as_successful( - block_number, - AggregationRound::Scheduler, - started_at.elapsed(), - ); - - transaction.commit_blocking(); - track_witness_generation_stage(started_at, AggregationRound::Scheduler); + SchedulerWitnessGeneratorJob { + block_number: l1_batch_number, + scheduler_witness, + node_vk, + } } diff --git a/core/bin/witness_generator/src/utils.rs b/core/bin/witness_generator/src/utils.rs index 5c8b3f4f14bf..f06e9eca4bb0 100644 --- a/core/bin/witness_generator/src/utils.rs +++ b/core/bin/witness_generator/src/utils.rs @@ -1,18 +1,31 @@ -use std::time::Instant; -use vm::zk_evm::ethereum_types::U256; -use zksync_config::configs::WitnessGeneratorConfig; -use zksync_object_store::{CircuitKey, ObjectStore}; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::USED_BOOTLOADER_MEMORY_BYTES; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; +use circuit_definitions::boojum::field::goldilocks::GoldilocksExt2; +use circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; +use circuit_definitions::circuit_definitions::base_layer::{ + ZkSyncBaseLayerCircuit, ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, +}; +use circuit_definitions::circuit_definitions::recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerProof, + ZkSyncRecursionLayerStorageType, ZkSyncRecursiveLayerCircuit, +}; -trait WitnessGenerator { - fn new(config: WitnessGeneratorConfig) -> Self; -} +use circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; +use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; +use circuit_definitions::ZkSyncDefaultRoundFunction; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; +use zkevm_test_harness::witness::full_block_artifact::BlockBasicCircuits; + +use zkevm_test_harness::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; +use zksync_config::constants::USED_BOOTLOADER_MEMORY_BYTES; +use zksync_object_store::{ + serialize_using_bincode, AggregationsKey, Bucket, ClosedFormInputKey, FriCircuitKey, + ObjectStore, StoredObject, +}; +use zksync_types::proofs::AggregationRound; +use zksync_types::{L1BatchNumber, U256}; -pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { +pub fn expand_bootloader_contents(packed: &[(usize, U256)]) -> Vec { let mut result: Vec = Vec::new(); result.resize(USED_BOOTLOADER_MEMORY_BYTES, 0); @@ -23,39 +36,230 @@ pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { result.to_vec() } -pub fn save_prover_input_artifacts( +#[derive(serde::Serialize, serde::Deserialize)] +pub enum CircuitWrapper { + Base( + ZkSyncBaseLayerCircuit< + GoldilocksField, + VmWitnessOracle, + ZkSyncDefaultRoundFunction, + >, + ), + Recursive(ZkSyncRecursiveLayerCircuit), +} + +impl StoredObject for CircuitWrapper { + const BUCKET: Bucket = Bucket::ProverJobsFri; + type Key<'a> = FriCircuitKey; + + fn encode_key(key: Self::Key<'_>) -> String { + let FriCircuitKey { + block_number, + sequence_number, + circuit_id, + aggregation_round, + depth, + } = key; + format!("{block_number}_{sequence_number}_{circuit_id}_{aggregation_round:?}_{depth}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct ClosedFormInputWrapper( + pub(crate) Vec>, + pub(crate) RecursionQueueSimulator, +); + +impl StoredObject for ClosedFormInputWrapper { + const BUCKET: Bucket = Bucket::LeafAggregationWitnessJobsFri; + type Key<'a> = ClosedFormInputKey; + + fn encode_key(key: Self::Key<'_>) -> String { + let ClosedFormInputKey { + block_number, + circuit_id, + } = key; + format!("closed_form_inputs_{block_number}_{circuit_id}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct AggregationWrapper( + pub Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, +); + +impl StoredObject for AggregationWrapper { + const BUCKET: Bucket = Bucket::NodeAggregationWitnessJobsFri; + type Key<'a> = AggregationsKey; + + fn encode_key(key: Self::Key<'_>) -> String { + let AggregationsKey { + block_number, + circuit_id, + depth, + } = key; + format!("aggregations_{block_number}_{circuit_id}_{depth}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct SchedulerPartialInputWrapper( + pub SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, +); + +impl StoredObject for SchedulerPartialInputWrapper { + const BUCKET: Bucket = Bucket::SchedulerWitnessJobsFri; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("scheduler_witness_{key}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub enum FriProofWrapper { + Base(ZkSyncBaseLayerProof), + Recursive(ZkSyncRecursionLayerProof), +} + +impl StoredObject for FriProofWrapper { + const BUCKET: Bucket = Bucket::ProofsFri; + type Key<'a> = u32; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("proof_{key}.bin") + } + + serialize_using_bincode!(); +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct AuxOutputWitnessWrapper(pub BlockAuxilaryOutputWitness); + +impl StoredObject for AuxOutputWitnessWrapper { + const BUCKET: Bucket = Bucket::SchedulerWitnessJobsFri; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("aux_output_witness_{key}.bin") + } + + serialize_using_bincode!(); +} + +pub async fn save_base_prover_input_artifacts( block_number: L1BatchNumber, - circuits: &[ZkSyncCircuit>], + circuits: BlockBasicCircuits, object_store: &dyn ObjectStore, aggregation_round: AggregationRound, -) -> Vec<(&'static str, String)> { - let types_and_urls = circuits - .iter() - .enumerate() - .map(|(sequence_number, circuit)| { - let circuit_type = circuit.short_description(); - let circuit_key = CircuitKey { - block_number, - sequence_number, - circuit_type, - aggregation_round, - }; - let blob_url = object_store.put(circuit_key, circuit).unwrap(); - (circuit_type, blob_url) - }); - types_and_urls.collect() -} - -pub fn track_witness_generation_stage(started_at: Instant, round: AggregationRound) { - let stage = match round { - AggregationRound::BasicCircuits => "basic_circuits", - AggregationRound::LeafAggregation => "leaf_aggregation", - AggregationRound::NodeAggregation => "node_aggregation", - AggregationRound::Scheduler => "scheduler", +) -> Vec<(u8, String)> { + let circuits = circuits.into_flattened_set(); + let mut ids_and_urls = Vec::with_capacity(circuits.len()); + for (sequence_number, circuit) in circuits.into_iter().enumerate() { + let circuit_id = circuit.numeric_circuit_type(); + let circuit_key = FriCircuitKey { + block_number, + sequence_number, + circuit_id, + aggregation_round, + depth: 0, + }; + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) + .await + .unwrap(); + ids_and_urls.push((circuit_id, blob_url)); + } + ids_and_urls +} + +pub async fn save_recursive_layer_prover_input_artifacts( + block_number: L1BatchNumber, + aggregations: Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, + aggregation_round: AggregationRound, + depth: u16, + object_store: &dyn ObjectStore, + base_layer_circuit_id: Option, +) -> Vec<(u8, String)> { + let mut ids_and_urls = Vec::with_capacity(aggregations.len()); + for (sequence_number, (_, _, circuit)) in aggregations.into_iter().enumerate() { + let circuit_id = base_layer_circuit_id.unwrap_or_else(|| circuit.numeric_circuit_type()); + let circuit_key = FriCircuitKey { + block_number, + sequence_number, + circuit_id, + aggregation_round, + depth, + }; + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Recursive(circuit)) + .await + .unwrap(); + ids_and_urls.push((circuit_id, blob_url)); + } + ids_and_urls +} + +pub async fn save_node_aggregations_artifacts( + block_number: L1BatchNumber, + circuit_id: u8, + depth: u16, + aggregations: Vec<( + u64, + RecursionQueueSimulator, + ZkSyncRecursiveLayerCircuit, + )>, + object_store: &dyn ObjectStore, +) -> String { + let key = AggregationsKey { + block_number, + circuit_id, + depth, }; - metrics::histogram!( - "server.witness_generator.processing_time", - started_at.elapsed(), - "stage" => format!("wit_gen_{}", stage) + object_store + .put(key, &AggregationWrapper(aggregations)) + .await + .unwrap() +} + +pub fn get_recursive_layer_circuit_id_for_base_layer(base_layer_circuit_id: u8) -> u8 { + let recursive_circuit_type = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(base_layer_circuit_id), ); + recursive_circuit_type as u8 +} + +pub fn get_base_layer_circuit_id_for_recursive_layer(recursive_layer_circuit_id: u8) -> u8 { + recursive_layer_circuit_id - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8 +} + +pub async fn load_proofs_for_job_ids( + job_ids: &[u32], + object_store: &dyn ObjectStore, +) -> Vec { + let mut proofs = Vec::with_capacity(job_ids.len()); + for &job_id in job_ids { + proofs.push(object_store.get(job_id).await.unwrap()); + } + proofs } diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index dcd6b5903ac6..4b4e12961046 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] publish = false # We don't want to publish our binaries. @@ -26,7 +26,6 @@ zksync_circuit_breaker = { path = "../../lib/circuit_breaker", version = "1.0" } vm = { path = "../../lib/vm", version = "0.1.0" } zksync_storage = { path = "../../lib/storage", version = "1.0" } zksync_merkle_tree = { path = "../../lib/merkle_tree", version = "1.0" } -zksync_merkle_tree2 = { path = "../../lib/merkle_tree2", version = "1.0" } zksync_mini_merkle_tree = { path = "../../lib/mini_merkle_tree", version = "1.0" } zksync_verification_key_generator_and_server = { path = "../verification_key_generator_and_server", version = "1.0" } prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } @@ -36,16 +35,13 @@ zksync_web3_decl = { path = "../../lib/web3_decl", version = "1.0", default-feat zksync_object_store = { path = "../../lib/object_store", version = "1.0" } zksync_health_check = { path = "../../lib/health_check", version = "0.1.0" } vlog = { path = "../../lib/vlog", version = "1.0" } -zksync_db_storage_provider = { path = "../../lib/db_storage_provider", version = "1.0" } clap = { version = "4.2.4", features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" metrics = "0.20" itertools = "0.10.3" -once_cell = "1.7" ctrlc = { version = "3.1", features = ["termination"] } -bincode = "1" rand = "0.8" tokio = { version = "1", features = ["time"] } @@ -68,17 +64,22 @@ bigdecimal = { version = "=0.2.0", features = ["serde"] } reqwest = { version = "0.11", features = ["blocking", "json"] } hex = "0.4" governor = "0.4.2" -tempfile = "3.0.2" +hyper = "0.14.26" +tower-http = { version = "0.4.1", features = ["full"] } +tower = { version = "0.4.13", features = ["full"] } actix-rt = "2.2.0" actix-cors = "0.6.0-beta.2" actix-web = "4.0.0-beta.8" -tracing = { version= "0.1.26" } +tracing = "0.1.26" [dev-dependencies] db_test_macro = { path = "../../lib/db_test_macro", version = "0.1.0" } + assert_matches = "1.5" +once_cell = "1.7" +tempfile = "3.0.2" [features] openzeppelin_tests = [] diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox.rs b/core/bin/zksync_core/src/api_server/execution_sandbox.rs deleted file mode 100644 index fe5d3e53e464..000000000000 --- a/core/bin/zksync_core/src/api_server/execution_sandbox.rs +++ /dev/null @@ -1,815 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::time::Instant; - -use thiserror::Error; -use tracing::{span, Level}; - -use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; -use vm::vm_with_bootloader::{ - derive_base_fee_and_gas_per_pubdata, init_vm, push_transaction_to_bootloader_memory, - BlockContext, BlockContextMode, BootloaderJobType, DerivedBlockContext, TxExecutionMode, -}; -use vm::zk_evm::block_properties::BlockProperties; -use vm::{ - storage::Storage, utils::ETH_CALL_GAS_LIMIT, TxRevertReason, VmExecutionResult, VmInstance, -}; -use vm::{HistoryDisabled, HistoryMode}; -use zksync_config::constants::ZKPORTER_IS_AVAILABLE; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_db_storage_provider::DbStorageProvider; -use zksync_state::storage_view::StorageView; -use zksync_types::api::BlockId; -use zksync_types::utils::storage_key_for_eth_balance; -use zksync_types::{ - api, - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - fee::TransactionExecutionMetrics, - get_nonce_key, - l2::L2Tx, - storage_writes_deduplicator::StorageWritesDeduplicator, - utils::{decompose_full_nonce, nonces_to_full_nonce}, - AccountTreeId, MiniblockNumber, Nonce, StorageKey, Transaction, H256, U256, -}; -use zksync_types::{PUBLISH_BYTECODE_OVERHEAD, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; -use zksync_utils::bytecode::{bytecode_len_in_bytes, hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::time::millis_since_epoch; -use zksync_utils::{h256_to_u256, u256_to_h256}; -use zksync_web3_decl::error::Web3Error; - -use crate::api_server::web3::backend_jsonrpc::error::internal_error; - -use super::tx_sender::SubmitTxError; - -#[derive(Debug, Error)] -pub enum SandboxExecutionError { - #[error("Account validation failed: {0}")] - AccountValidationFailed(String), - #[error("Failed to charge fee: {0}")] - FailedToChargeFee(String), - #[error("Paymaster validation failed: {0}")] - PaymasterValidationFailed(String), - #[error("Pre-paymaster preparation failed: {0}")] - PrePaymasterPreparationFailed(String), - #[error("From is not an account")] - FromIsNotAnAccount, - #[error("Bootloader failure: {0}")] - BootloaderFailure(String), - #[error("Revert: {0}")] - Revert(String, Vec), - #[error("Failed to pay for the transaction: {0}")] - FailedToPayForTransaction(String), - #[error("Bootloader-based tx failed")] - InnerTxError, - #[error( - "Virtual machine entered unexpected state. Please contact developers and provide transaction details \ - that caused this error. Error description: {0}" - )] - UnexpectedVMBehavior(String), - #[error("Transaction is unexecutable. Reason: {0}")] - Unexecutable(String), -} - -#[allow(clippy::too_many_arguments)] -pub fn execute_tx_eth_call( - connection_pool: &ConnectionPool, - mut tx: L2Tx, - block_id: api::BlockId, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - base_system_contract: &BaseSystemContracts, - vm_execution_cache_misses_limit: Option, - trace_call: bool, -) -> Result { - let mut storage = connection_pool.access_storage_blocking(); - let resolved_block_number = storage - .blocks_web3_dal() - .resolve_block_id(block_id) - .map_err(|err| internal_error("eth_call", err))??; - let block_timestamp_s = storage - .blocks_web3_dal() - .get_block_timestamp(resolved_block_number) - .unwrap(); - - // Protection against infinite-loop eth_calls and alike: - // limiting the amount of gas the call can use. - // We can't use BLOCK_ERGS_LIMIT here since the VM itself has some overhead. - tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); - let vm_result = execute_tx_in_sandbox( - storage, - tx.into(), - TxExecutionMode::EthCall { - missed_storage_invocation_limit: vm_execution_cache_misses_limit.unwrap_or(usize::MAX), - }, - AccountTreeId::default(), - block_id, - resolved_block_number, - block_timestamp_s, - None, - U256::zero(), - BootloaderJobType::TransactionExecution, - l1_gas_price, - fair_l2_gas_price, - enforced_base_fee, - base_system_contract, - trace_call, - &mut Default::default(), - ) - .1 - .map_err(|err| { - let submit_tx_error: SubmitTxError = err.into(); - Web3Error::SubmitTransactionError(submit_tx_error.to_string(), submit_tx_error.data()) - })?; - Ok(vm_result) -} - -fn get_pending_state( - connection_pool: &ConnectionPool, -) -> (BlockId, StorageProcessor<'_>, MiniblockNumber) { - let block_id = api::BlockId::Number(api::BlockNumber::Pending); - let mut connection = connection_pool.access_storage_blocking(); - let resolved_block_number = connection - .blocks_web3_dal() - .resolve_block_id(block_id) - .unwrap() - .expect("Pending block should be present"); - - (block_id, connection, resolved_block_number) -} - -#[tracing::instrument(skip( - connection_pool, - tx, - operator_account, - enforced_nonce, - base_system_contracts, - storage_read_cache -))] -#[allow(clippy::too_many_arguments)] -pub fn execute_tx_with_pending_state( - connection_pool: &ConnectionPool, - tx: Transaction, - operator_account: AccountTreeId, - execution_mode: TxExecutionMode, - enforced_nonce: Option, - added_balance: U256, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - base_system_contracts: &BaseSystemContracts, - storage_read_cache: &mut HashMap, -) -> ( - TransactionExecutionMetrics, - Result, -) { - let (block_id, connection, resolved_block_number) = get_pending_state(connection_pool); - - // In order for execution to pass smoothlessly, we need to ensure that block's required gasPerPubdata will be - // <= to the one in the transaction itself. - let l1_gas_price = adjust_l1_gas_price_for_tx( - l1_gas_price, - fair_l2_gas_price, - tx.gas_per_pubdata_byte_limit(), - ); - - execute_tx_in_sandbox( - connection, - tx, - execution_mode, - operator_account, - block_id, - resolved_block_number, - None, - enforced_nonce, - added_balance, - BootloaderJobType::TransactionExecution, - l1_gas_price, - fair_l2_gas_price, - enforced_base_fee, - base_system_contracts, - false, - storage_read_cache, - ) -} - -// Returns the number of the pubdata that the transaction will spend on factory deps -pub fn get_pubdata_for_factory_deps( - connection_pool: &ConnectionPool, - factory_deps: &Option>>, -) -> u32 { - let (_, connection, block_number) = get_pending_state(connection_pool); - let db_storage_provider = DbStorageProvider::new(connection, block_number, false); - let mut storage_view = StorageView::new(db_storage_provider); - - factory_deps - .as_ref() - .map(|deps| { - deps.iter() - .filter_map(|bytecode| { - if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) { - return None; - } - - let length = if let Ok(compressed) = - CompressedBytecodeInfo::from_original(bytecode.clone()) - { - compressed.compressed.len() - } else { - bytecode.len() - }; - - Some(length as u32 + PUBLISH_BYTECODE_OVERHEAD) - }) - .sum() - }) - .unwrap_or_default() -} - -#[allow(clippy::too_many_arguments)] -pub fn validate_tx_with_pending_state( - connection_pool: &ConnectionPool, - tx: L2Tx, - operator_account: AccountTreeId, - execution_mode: TxExecutionMode, - enforced_nonce: Option, - added_balance: U256, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - base_system_contracts: &BaseSystemContracts, - computational_gas_limit: u32, -) -> Result<(), ValidationError> { - let (block_id, connection, resolved_block_number) = get_pending_state(connection_pool); - - // In order for validation to pass smoothlessly, we need to ensure that block's required gasPerPubdata will be - // <= to the one in the transaction itself. - let l1_gas_price = adjust_l1_gas_price_for_tx( - l1_gas_price, - fair_l2_gas_price, - tx.common_data.fee.gas_per_pubdata_limit, - ); - - validate_tx_in_sandbox( - connection, - tx, - execution_mode, - operator_account, - base_system_contracts, - block_id, - resolved_block_number, - None, - enforced_nonce, - added_balance, - l1_gas_price, - fair_l2_gas_price, - enforced_base_fee, - computational_gas_limit, - ) -} - -pub(crate) fn adjust_l1_gas_price_for_tx( - l1_gas_price: u64, - fair_l2_gas_price: u64, - tx_gas_per_pubdata_limit: U256, -) -> u64 { - let current_pubdata_price = - derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price).1; - if U256::from(current_pubdata_price) <= tx_gas_per_pubdata_limit { - // The current pubdata price is small enough - l1_gas_price - } else { - // gasPerPubdata = ceil(17 * l1gasprice / fair_l2_gas_price) - // gasPerPubdata <= 17 * l1gasprice / fair_l2_gas_price + 1 - // fair_l2_gas_price(gasPerPubdata - 1) / 17 <= l1gasprice - let l1_gas_price = U256::from(fair_l2_gas_price) - * (tx_gas_per_pubdata_limit - U256::from(1u32)) - / U256::from(17); - - l1_gas_price.as_u64() - } -} - -/// This method assumes that (block with number `resolved_block_number` is present in DB) -/// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) -#[allow(clippy::too_many_arguments)] -#[tracing::instrument(skip( - connection, - tx, - operator_account, - block_timestamp_s, - base_system_contract, - storage_read_cache -))] -fn execute_tx_in_sandbox( - connection: StorageProcessor<'_>, - tx: Transaction, - execution_mode: TxExecutionMode, - operator_account: AccountTreeId, - block_id: api::BlockId, - resolved_block_number: zksync_types::MiniblockNumber, - block_timestamp_s: Option, - enforced_nonce: Option, - added_balance: U256, - job_type: BootloaderJobType, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - base_system_contract: &BaseSystemContracts, - trace_call: bool, - storage_read_cache: &mut HashMap, -) -> ( - TransactionExecutionMetrics, - Result, -) { - let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); - - let total_factory_deps = tx - .execute - .factory_deps - .as_ref() - .map_or(0, |deps| deps.len() as u16); - - let execution_result = apply_vm_in_sandbox( - connection, - tx, - execution_mode, - base_system_contract, - operator_account, - block_id, - resolved_block_number, - block_timestamp_s, - enforced_nonce, - added_balance, - l1_gas_price, - fair_l2_gas_price, - enforced_base_fee, - storage_read_cache, - |vm, tx| { - push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); - let result = if trace_call { - vm.execute_till_block_end_with_call_tracer(job_type) - } else { - vm.execute_till_block_end(job_type) - }; - - span.exit(); - result.full_result - }, - ); - - let tx_execution_metrics = collect_tx_execution_metrics(total_factory_deps, &execution_result); - - ( - tx_execution_metrics, - match execution_result.revert_reason { - None => Ok(execution_result), - Some(revert) => Err(revert.revert_reason.into()), - }, - ) -} - -#[allow(clippy::too_many_arguments)] -fn apply_vm_in_sandbox( - mut connection: StorageProcessor<'_>, - tx: Transaction, - execution_mode: TxExecutionMode, - base_system_contracts: &BaseSystemContracts, - operator_account: AccountTreeId, - block_id: api::BlockId, - resolved_block_number: zksync_types::MiniblockNumber, - block_timestamp_s: Option, - enforced_nonce: Option, - added_balance: U256, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - storage_read_cache: &mut HashMap, - apply: impl FnOnce(&mut Box>, Transaction) -> T, -) -> T { - let stage_started_at = Instant::now(); - let span = span!(Level::DEBUG, "initialization").entered(); - - let (state_block_number, vm_block_number) = match block_id { - api::BlockId::Number(api::BlockNumber::Pending) => { - let sealed_l1_batch_number = connection - .blocks_web3_dal() - .get_sealed_l1_batch_number() - .unwrap(); - let sealed_miniblock_number = connection - .blocks_web3_dal() - .get_sealed_miniblock_number() - .unwrap(); - (sealed_miniblock_number, sealed_l1_batch_number + 1) - } - _ => { - let l1_batch_number = match connection - .blocks_web3_dal() - .get_l1_batch_number_of_miniblock(resolved_block_number) - .unwrap() - { - Some(l1_batch_number) => l1_batch_number, - None => { - connection - .blocks_web3_dal() - .get_sealed_l1_batch_number() - .unwrap() - + 1 - } - }; - (resolved_block_number, l1_batch_number) - } - }; - - let db_storage_provider = DbStorageProvider::new(connection, state_block_number, false); - - // Moving `storage_read_cache` to `storage_view`. It will be moved back once execution is finished and `storage_view` is not needed. - let mut storage_view = - StorageView::new_with_read_keys(db_storage_provider, std::mem::take(storage_read_cache)); - - let block_timestamp_ms = match block_id { - api::BlockId::Number(api::BlockNumber::Pending) => millis_since_epoch(), - _ => { - let block_timestamp_s = block_timestamp_s.unwrap_or_else(|| { - panic!( - "Block timestamp is `None`, `block_id`: {:?}, `resolved_block_number`: {}", - block_id, resolved_block_number.0 - ) - }); - (block_timestamp_s as u128) * 1000 - } - }; - - if let Some(nonce) = enforced_nonce { - let nonce_key = get_nonce_key(&tx.initiator_account()); - let full_nonce = storage_view.get_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - - let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - - storage_view.set_value(&nonce_key, u256_to_h256(enforced_full_nonce)); - } - - { - let payer = tx.payer(); - let balance_key = storage_key_for_eth_balance(&payer); - - let current_balance = h256_to_u256(storage_view.get_value(&balance_key)); - storage_view.set_value(&balance_key, u256_to_h256(current_balance + added_balance)); - } - - let mut oracle_tools = - vm::OracleTools::new(&mut storage_view as &mut dyn Storage, HistoryDisabled); - let block_properties = BlockProperties { - default_aa_code_hash: h256_to_u256(base_system_contracts.default_aa.hash), - zkporter_is_available: ZKPORTER_IS_AVAILABLE, - }; - - let block_context = DerivedBlockContext { - context: BlockContext { - block_number: vm_block_number.0, - block_timestamp: (block_timestamp_ms / 1000) as u64, - l1_gas_price, - fair_l2_gas_price, - operator_address: *operator_account.address(), - }, - base_fee: enforced_base_fee.unwrap_or_else(|| { - derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price).0 - }), - }; - - // Since this method assumes that the block vm_block_number-1 is present in the DB, it means that its hash - // has already been stored in the VM. - let block_context_properties = BlockContextMode::OverrideCurrent(block_context); - - let mut vm = init_vm( - &mut oracle_tools, - block_context_properties, - &block_properties, - execution_mode, - base_system_contracts, - ); - - metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "initialization"); - span.exit(); - - let tx_id = format!( - "{:?}-{} ", - tx.initiator_account(), - tx.nonce().unwrap_or(Nonce(0)) - ); - - let stage_started_at = Instant::now(); - let result = apply(&mut vm, tx); - let vm_execution_took = stage_started_at.elapsed(); - metrics::histogram!("api.web3.sandbox", vm_execution_took, "stage" => "execution"); - - let oracles_sizes = record_vm_memory_metrics(vm); - let storage_view_cache = storage_view.get_cache_size(); - metrics::histogram!( - "runtime_context.memory.storage_view_cache_size", - storage_view_cache as f64 - ); - metrics::histogram!( - "runtime_context.memory", - (oracles_sizes + storage_view_cache) as f64 - ); - - let total_storage_invocations = - storage_view.get_value_storage_invocations + storage_view.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_view.time_spent_on_get_value + storage_view.time_spent_on_set_value; - - metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.storage_invocations_missed as f64, "interaction" => "missed"); - metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.get_value_storage_invocations as f64, "interaction" => "get_value"); - metrics::histogram!("runtime_context.storage_interaction.amount", storage_view.set_value_storage_invocations as f64, "interaction" => "set_value"); - metrics::histogram!("runtime_context.storage_interaction.amount", (total_storage_invocations) as f64, "interaction" => "total"); - - metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_storage_missed, "interaction" => "missed"); - metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_get_value, "interaction" => "get_value"); - metrics::histogram!("runtime_context.storage_interaction.duration", storage_view.time_spent_on_set_value, "interaction" => "set_value"); - metrics::histogram!("runtime_context.storage_interaction.duration", total_time_spent_in_storage, "interaction" => "total"); - - if total_storage_invocations > 0 { - metrics::histogram!( - "runtime_context.storage_interaction.duration_per_unit", - total_time_spent_in_storage.div_f64(total_storage_invocations as f64), - "interaction" => "total" - ); - } - if storage_view.storage_invocations_missed > 0 { - metrics::histogram!( - "runtime_context.storage_interaction.duration_per_unit", - storage_view.time_spent_on_storage_missed.div_f64(storage_view.storage_invocations_missed as f64), - "interaction" => "missed" - ); - } - - metrics::histogram!( - "runtime_context.storage_interaction.ratio", - total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64(), - ); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - vlog::info!( - "Tx {} resulted in {} storage_invocations, {} new_storage_invocations, {} get_value_storage_invocations, {} set_value_storage_invocations, vm execution tool {:?}, storage interaction took {:?} (missed: {:?} get: {:?} set: {:?})", - tx_id, - total_storage_invocations, - storage_view.storage_invocations_missed, - storage_view.get_value_storage_invocations, - storage_view.set_value_storage_invocations, - vm_execution_took, - total_time_spent_in_storage, - storage_view.time_spent_on_storage_missed, - storage_view.time_spent_on_get_value, - storage_view.time_spent_on_set_value, - ); - } - - // Move `read_storage_keys` from `storage_view` back to cache. - *storage_read_cache = storage_view.take_read_storage_keys(); - - result -} - -// Some slots can be marked as "trusted". That is needed for slots which can not be -// trusted to change between validation and execution in general case, but -// sometimes we can safely rely on them to not change often. -fn get_validation_params( - connection: &mut StorageProcessor<'_>, - tx: &L2Tx, - computational_gas_limit: u32, -) -> ValidationTracerParams { - let user_address = tx.common_data.initiator_address; - let paymaster_address = tx.common_data.paymaster_params.paymaster; - - // This method assumes that the number of "well-known" tokens is relatively low. When it grows - // we may need to introduce some kind of caching. - let well_known_tokens: Vec<_> = connection - .tokens_dal() - .get_well_known_token_addresses() - .into_iter() - .map(|token| token.1) - .collect(); - - let trusted_slots: HashSet<_> = well_known_tokens - .clone() - .into_iter() - .flat_map(|token| { - TRUSTED_TOKEN_SLOTS - .clone() - .into_iter() - .map(move |slot| (token, slot)) - }) - .collect(); - - // We currently don't support any specific trusted addresses. - let trusted_addresses = HashSet::new(); - - // The slots the value of which will be added as allowed address on the fly. - // Required for working with transparent proxies. - let trusted_address_slots: HashSet<_> = well_known_tokens - .into_iter() - .flat_map(|token| { - TRUSTED_ADDRESS_SLOTS - .clone() - .into_iter() - .map(move |slot| (token, slot)) - }) - .collect(); - - ValidationTracerParams { - user_address, - paymaster_address, - trusted_slots, - trusted_addresses, - trusted_address_slots, - computational_gas_limit, - } -} - -#[allow(clippy::too_many_arguments)] -fn validate_tx_in_sandbox( - mut connection: StorageProcessor<'_>, - tx: L2Tx, - execution_mode: TxExecutionMode, - operator_account: AccountTreeId, - base_system_contracts: &BaseSystemContracts, - block_id: api::BlockId, - resolved_block_number: zksync_types::MiniblockNumber, - block_timestamp_s: Option, - enforced_nonce: Option, - added_balance: U256, - l1_gas_price: u64, - fair_l2_gas_price: u64, - enforced_base_fee: Option, - computational_gas_limit: u32, -) -> Result<(), ValidationError> { - let stage_started_at = Instant::now(); - let span = span!(Level::DEBUG, "validate_in_sandbox").entered(); - let validation_params = get_validation_params(&mut connection, &tx, computational_gas_limit); - - let tx: Transaction = tx.into(); - - let validation_result = apply_vm_in_sandbox( - connection, - tx, - execution_mode, - base_system_contracts, - operator_account, - block_id, - resolved_block_number, - block_timestamp_s, - enforced_nonce, - added_balance, - l1_gas_price, - fair_l2_gas_price, - enforced_base_fee, - &mut Default::default(), - |vm, tx| { - let stage_started_at = Instant::now(); - let span = span!(Level::DEBUG, "validation").entered(); - - push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); - let result = vm.execute_validation(validation_params); - - metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "validation"); - span.exit(); - - result - }, - ); - - metrics::histogram!("server.api.validation_sandbox", stage_started_at.elapsed(), "stage" => "validate_in_sandbox"); - span.exit(); - - validation_result -} - -fn collect_tx_execution_metrics( - contracts_deployed: u16, - result: &VmExecutionResult, -) -> TransactionExecutionMetrics { - let event_topics = result - .events - .iter() - .map(|event| event.indexed_topics.len() as u16) - .sum(); - - let l2_l1_long_messages = extract_long_l2_to_l1_messages(&result.events) - .iter() - .map(|event| event.len()) - .sum(); - - let published_bytecode_bytes = extract_published_bytecodes(&result.events) - .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash)) - .sum(); - - let writes_metrics = - StorageWritesDeduplicator::apply_on_empty_state(&result.storage_log_queries); - - TransactionExecutionMetrics { - initial_storage_writes: writes_metrics.initial_storage_writes, - repeated_storage_writes: writes_metrics.repeated_storage_writes, - gas_used: result.gas_used as usize, - event_topics, - l2_l1_long_messages, - published_bytecode_bytes, - contracts_used: result.contracts_used, - contracts_deployed, - l2_l1_logs: result.l2_to_l1_logs.len(), - vm_events: result.events.len(), - storage_logs: result.storage_log_queries.len(), - total_log_queries: result.total_log_queries, - cycles_used: result.cycles_used, - computational_gas_used: result.computational_gas_used, - } -} - -impl From for SandboxExecutionError { - fn from(reason: TxRevertReason) -> Self { - match reason { - TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert( - reason.to_user_friendly_string(), - reason.encoded_data(), - ), - TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert( - reason.to_user_friendly_string(), - reason.encoded_data(), - ), - TxRevertReason::FailedToChargeFee(reason) => { - SandboxExecutionError::FailedToChargeFee(reason.to_string()) - } - TxRevertReason::FromIsNotAnAccount => SandboxExecutionError::FromIsNotAnAccount, - TxRevertReason::InnerTxError => SandboxExecutionError::InnerTxError, - TxRevertReason::Unknown(reason) => { - SandboxExecutionError::BootloaderFailure(reason.to_string()) - } - TxRevertReason::ValidationFailed(reason) => { - SandboxExecutionError::AccountValidationFailed(reason.to_string()) - } - TxRevertReason::PaymasterValidationFailed(reason) => { - SandboxExecutionError::PaymasterValidationFailed(reason.to_string()) - } - TxRevertReason::PrePaymasterPreparationFailed(reason) => { - SandboxExecutionError::PrePaymasterPreparationFailed(reason.to_string()) - } - TxRevertReason::UnexpectedVMBehavior(reason) => { - SandboxExecutionError::UnexpectedVMBehavior(reason) - } - TxRevertReason::BootloaderOutOfGas => { - SandboxExecutionError::UnexpectedVMBehavior("bootloader is out of gas".to_string()) - } - TxRevertReason::NotEnoughGasProvided => SandboxExecutionError::UnexpectedVMBehavior( - "The bootloader did not contain enough gas to execute the transaction".to_string(), - ), - revert_reason @ TxRevertReason::FailedToMarkFactoryDependencies(_) => { - SandboxExecutionError::Revert(revert_reason.to_string(), vec![]) - } - TxRevertReason::PayForTxFailed(reason) => { - SandboxExecutionError::FailedToPayForTransaction(reason.to_string()) - } - TxRevertReason::TooBigGasLimit => { - SandboxExecutionError::Revert(TxRevertReason::TooBigGasLimit.to_string(), vec![]) - } - TxRevertReason::MissingInvocationLimitReached => SandboxExecutionError::InnerTxError, - } - } -} - -/// Returns the sum of all oracles' sizes. -fn record_vm_memory_metrics(vm: Box>) -> usize { - let event_sink_inner = vm.state.event_sink.get_size(); - let event_sink_history = vm.state.event_sink.get_history_size(); - let memory_inner = vm.state.memory.get_size(); - let memory_history = vm.state.memory.get_history_size(); - let decommittment_processor_inner = vm.state.decommittment_processor.get_size(); - let decommittment_processor_history = vm.state.decommittment_processor.get_history_size(); - let storage_inner = vm.state.storage.get_size(); - let storage_history = vm.state.storage.get_history_size(); - - metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.memory_size", memory_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.memory_size", memory_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_history as f64, "type" => "history"); - metrics::histogram!("runtime_context.memory.storage_size", storage_inner as f64, "type" => "inner"); - metrics::histogram!("runtime_context.memory.storage_size", storage_history as f64, "type" => "history"); - - [ - event_sink_inner, - event_sink_history, - memory_inner, - memory_history, - decommittment_processor_inner, - decommittment_processor_history, - storage_inner, - storage_history, - ] - .iter() - .sum::() -} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs new file mode 100644 index 000000000000..458b7833d5f1 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -0,0 +1,204 @@ +//! This module provides primitives focusing on the VM instantiation and execution for different use cases. +//! It is rather generic and low-level, so it's not supposed to be a part of public API. +//! +//! Instead, we expect people to write wrappers in the `execution_sandbox` module with a more high-level API +//! that would, in its turn, be used by the actual API method handlers. +//! +//! This module is intended to be blocking. + +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; + +use vm::{ + vm_with_bootloader::{ + derive_base_fee_and_gas_per_pubdata, init_vm, BlockContext, BlockContextMode, + DerivedBlockContext, + }, + zk_evm::block_properties::BlockProperties, + HistoryDisabled, VmInstance, +}; +use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_state::{PostgresStorage, ReadStorage, StorageView, WriteStorage}; +use zksync_types::{ + api, get_nonce_key, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + L1BatchNumber, MiniblockNumber, Nonce, StorageKey, Transaction, H256, U256, +}; +use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; + +use super::{vm_metrics, BlockArgs, TxExecutionArgs, TxSharedArgs}; + +#[allow(clippy::too_many_arguments)] +pub(super) fn apply_vm_in_sandbox( + rt_handle: tokio::runtime::Handle, + shared_args: &TxSharedArgs, + execution_args: &TxExecutionArgs, + connection_pool: &ConnectionPool, + tx: Transaction, + block_args: BlockArgs, + storage_read_cache: HashMap, + apply: impl FnOnce(&mut Box>, Transaction) -> T, +) -> (T, HashMap) { + let stage_started_at = Instant::now(); + let span = tracing::debug_span!("initialization").entered(); + + let mut connection = rt_handle.block_on(connection_pool.access_storage_tagged("api")); + let connection_acquire_time = stage_started_at.elapsed(); + // We don't want to emit too many logs. + if connection_acquire_time > Duration::from_millis(10) { + vlog::debug!( + "Obtained connection (took {:?})", + stage_started_at.elapsed() + ); + } + + let resolve_started_at = Instant::now(); + let (state_block_number, vm_block_number) = + rt_handle.block_on(block_args.resolve_block_numbers(&mut connection)); + let resolve_time = resolve_started_at.elapsed(); + // We don't want to emit too many logs. + if resolve_time > Duration::from_millis(10) { + vlog::debug!( + "Resolved block numbers (took {:?})", + resolve_started_at.elapsed() + ); + } + + let block_timestamp = block_args.block_timestamp_seconds(); + + let storage = PostgresStorage::new(rt_handle, connection, state_block_number, false) + .with_factory_deps_cache(shared_args.factory_deps_cache.clone()); + // Moving `storage_read_cache` to `storage_view`. It will be moved back once execution is finished and `storage_view` is not needed. + let mut storage_view = StorageView::new_with_read_keys(storage, storage_read_cache); + + let storage_view_setup_started_at = Instant::now(); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&tx.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + } + + let payer = tx.payer(); + let balance_key = storage_key_for_eth_balance(&payer); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); + let storage_view_setup_time = storage_view_setup_started_at.elapsed(); + // We don't want to emit too many logs. + if storage_view_setup_time > Duration::from_millis(10) { + vlog::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + } + + let mut oracle_tools = vm::OracleTools::new(&mut storage_view, HistoryDisabled); + let block_properties = BlockProperties { + default_aa_code_hash: h256_to_u256(shared_args.base_system_contracts.default_aa.hash), + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + }; + let &TxSharedArgs { + l1_gas_price, + fair_l2_gas_price, + .. + } = shared_args; + + let block_context = DerivedBlockContext { + context: BlockContext { + block_number: vm_block_number.0, + block_timestamp, + l1_gas_price, + fair_l2_gas_price, + operator_address: *shared_args.operator_account.address(), + }, + base_fee: execution_args.enforced_base_fee.unwrap_or_else(|| { + derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price).0 + }), + }; + + // Since this method assumes that the block vm_block_number-1 is present in the DB, it means that its hash + // has already been stored in the VM. + let block_context_properties = BlockContextMode::OverrideCurrent(block_context); + + let mut vm = init_vm( + &mut oracle_tools, + block_context_properties, + &block_properties, + execution_args.execution_mode, + &shared_args.base_system_contracts, + ); + + metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "initialization"); + span.exit(); + + let tx_id = format!( + "{:?}-{}", + tx.initiator_account(), + tx.nonce().unwrap_or(Nonce(0)) + ); + let stage_started_at = Instant::now(); + let result = apply(&mut vm, tx); + let vm_execution_took = stage_started_at.elapsed(); + metrics::histogram!("api.web3.sandbox", vm_execution_took, "stage" => "execution"); + + let oracles_sizes = vm_metrics::record_vm_memory_metrics(&vm); + vm_metrics::report_storage_view_metrics( + &tx_id, + oracles_sizes, + vm_execution_took, + storage_view.metrics(), + ); + + // Move `read_storage_keys` from `storage_view` back to cache. + (result, storage_view.into_read_storage_keys()) +} + +impl BlockArgs { + fn is_pending_miniblock(&self) -> bool { + matches!( + self.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ) + } + + async fn resolve_block_numbers( + &self, + connection: &mut StorageProcessor<'_>, + ) -> (MiniblockNumber, L1BatchNumber) { + if self.is_pending_miniblock() { + let sealed_l1_batch_number = connection + .blocks_web3_dal() + .get_sealed_l1_batch_number() + .await + .unwrap(); + let sealed_miniblock_number = connection + .blocks_web3_dal() + .get_sealed_miniblock_number() + .await + .unwrap(); + (sealed_miniblock_number, sealed_l1_batch_number + 1) + } else { + let l1_batch_number = connection + .storage_web3_dal() + .get_provisional_l1_batch_number_of_miniblock_unchecked(self.resolved_block_number) + .await + .unwrap(); + (self.resolved_block_number, l1_batch_number) + } + } + + fn block_timestamp_seconds(&self) -> u64 { + if self.is_pending_miniblock() { + seconds_since_epoch() + } else { + self.block_timestamp_s.unwrap_or_else(|| { + panic!( + "Block timestamp is `None`, `block_id`: {:?}, `resolved_block_number`: {}", + self.block_id, self.resolved_block_number.0 + ); + }) + } + } +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs new file mode 100644 index 000000000000..4b67d987ebcf --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/error.rs @@ -0,0 +1,83 @@ +use thiserror::Error; + +use vm::TxRevertReason; + +#[derive(Debug, Error)] +pub(crate) enum SandboxExecutionError { + #[error("Account validation failed: {0}")] + AccountValidationFailed(String), + #[error("Failed to charge fee: {0}")] + FailedToChargeFee(String), + #[error("Paymaster validation failed: {0}")] + PaymasterValidationFailed(String), + #[error("Pre-paymaster preparation failed: {0}")] + PrePaymasterPreparationFailed(String), + #[error("From is not an account")] + FromIsNotAnAccount, + #[error("Bootloader failure: {0}")] + BootloaderFailure(String), + #[error("Revert: {0}")] + Revert(String, Vec), + #[error("Failed to pay for the transaction: {0}")] + FailedToPayForTransaction(String), + #[error("Bootloader-based tx failed")] + InnerTxError, + #[error( + "Virtual machine entered unexpected state. Please contact developers and provide transaction details \ + that caused this error. Error description: {0}" + )] + UnexpectedVMBehavior(String), + #[error("Transaction is unexecutable. Reason: {0}")] + Unexecutable(String), +} + +impl From for SandboxExecutionError { + fn from(reason: TxRevertReason) -> Self { + match reason { + TxRevertReason::EthCall(reason) => SandboxExecutionError::Revert( + reason.to_user_friendly_string(), + reason.encoded_data(), + ), + TxRevertReason::TxReverted(reason) => SandboxExecutionError::Revert( + reason.to_user_friendly_string(), + reason.encoded_data(), + ), + TxRevertReason::FailedToChargeFee(reason) => { + SandboxExecutionError::FailedToChargeFee(reason.to_string()) + } + TxRevertReason::FromIsNotAnAccount => SandboxExecutionError::FromIsNotAnAccount, + TxRevertReason::InnerTxError => SandboxExecutionError::InnerTxError, + TxRevertReason::Unknown(reason) => { + SandboxExecutionError::BootloaderFailure(reason.to_string()) + } + TxRevertReason::ValidationFailed(reason) => { + SandboxExecutionError::AccountValidationFailed(reason.to_string()) + } + TxRevertReason::PaymasterValidationFailed(reason) => { + SandboxExecutionError::PaymasterValidationFailed(reason.to_string()) + } + TxRevertReason::PrePaymasterPreparationFailed(reason) => { + SandboxExecutionError::PrePaymasterPreparationFailed(reason.to_string()) + } + TxRevertReason::UnexpectedVMBehavior(reason) => { + SandboxExecutionError::UnexpectedVMBehavior(reason) + } + TxRevertReason::BootloaderOutOfGas => { + SandboxExecutionError::UnexpectedVMBehavior("bootloader is out of gas".to_string()) + } + TxRevertReason::NotEnoughGasProvided => SandboxExecutionError::UnexpectedVMBehavior( + "The bootloader did not contain enough gas to execute the transaction".to_string(), + ), + revert_reason @ TxRevertReason::FailedToMarkFactoryDependencies(_) => { + SandboxExecutionError::Revert(revert_reason.to_string(), vec![]) + } + TxRevertReason::PayForTxFailed(reason) => { + SandboxExecutionError::FailedToPayForTransaction(reason.to_string()) + } + TxRevertReason::TooBigGasLimit => { + SandboxExecutionError::Revert(TxRevertReason::TooBigGasLimit.to_string(), vec![]) + } + TxRevertReason::MissingInvocationLimitReached => SandboxExecutionError::InnerTxError, + } + } +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs new file mode 100644 index 000000000000..51d5807e6fb2 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -0,0 +1,208 @@ +//! Implementation of "executing" methods, e.g. `eth_call`. + +use tracing::{span, Level}; + +use std::{collections::HashMap, mem}; + +use vm::{ + utils::ETH_CALL_GAS_LIMIT, + vm_with_bootloader::{ + push_transaction_to_bootloader_memory, BootloaderJobType, TxExecutionMode, + }, + VmExecutionResult, +}; +use zksync_dal::ConnectionPool; +use zksync_types::{ + fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, StorageKey, + Transaction, H256, U256, +}; + +use super::{apply, error::SandboxExecutionError, vm_metrics, BlockArgs, TxSharedArgs, VmPermit}; + +#[derive(Debug)] +pub(crate) struct TxExecutionArgs { + pub execution_mode: TxExecutionMode, + pub enforced_nonce: Option, + pub added_balance: U256, + pub enforced_base_fee: Option, +} + +impl TxExecutionArgs { + pub fn for_validation(tx: &L2Tx) -> Self { + Self { + execution_mode: TxExecutionMode::VerifyExecute, + enforced_nonce: Some(tx.nonce()), + added_balance: U256::zero(), + enforced_base_fee: Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + } + } + + fn for_eth_call( + enforced_base_fee: u64, + vm_execution_cache_misses_limit: Option, + ) -> Self { + let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + Self { + execution_mode: TxExecutionMode::EthCall { + missed_storage_invocation_limit, + }, + enforced_nonce: None, + added_balance: U256::zero(), + enforced_base_fee: Some(enforced_base_fee), + } + } + + pub fn for_gas_estimate( + vm_execution_cache_misses_limit: Option, + tx: &Transaction, + base_fee: u64, + ) -> Self { + let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); + // For L2 transactions we need to explicitly put enough balance into the account of the users + // while for L1->L2 transactions the `to_mint` field plays this role + let added_balance = match &tx.common_data { + ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, + ExecuteTransactionCommon::L1(_) => U256::zero(), + }; + + Self { + execution_mode: TxExecutionMode::EstimateFee { + missed_storage_invocation_limit, + }, + enforced_nonce: tx.nonce(), + added_balance, + enforced_base_fee: Some(base_fee), + } + } +} + +#[allow(clippy::too_many_arguments)] +pub(crate) async fn execute_tx_eth_call( + vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + shared_args: TxSharedArgs, + connection_pool: ConnectionPool, + mut tx: L2Tx, + block_args: BlockArgs, + vm_execution_cache_misses_limit: Option, + trace_call: bool, +) -> Result { + let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64(); + let execution_args = + TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit); + + // Protection against infinite-loop eth_calls and alike: + // limiting the amount of gas the call can use. + // We can't use BLOCK_ERGS_LIMIT here since the VM itself has some overhead. + tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); + let (vm_result, _) = execute_tx_in_sandbox( + vm_permit, + shared_args, + execution_args, + connection_pool, + tx.into(), + block_args, + BootloaderJobType::TransactionExecution, + trace_call, + &mut HashMap::new(), + ) + .await; + + vm_result +} + +#[tracing::instrument(skip_all)] +pub(crate) async fn execute_tx_with_pending_state( + vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + mut shared_args: TxSharedArgs, + execution_args: TxExecutionArgs, + connection_pool: ConnectionPool, + tx: Transaction, + storage_read_cache: &mut HashMap, +) -> ( + Result, + TransactionExecutionMetrics, +) { + let mut connection = connection_pool.access_storage_tagged("api").await; + let block_args = BlockArgs::pending(&mut connection).await; + drop(connection); + // In order for execution to pass smoothlessly, we need to ensure that block's required gasPerPubdata will be + // <= to the one in the transaction itself. + shared_args.adjust_l1_gas_price(tx.gas_per_pubdata_byte_limit()); + + execute_tx_in_sandbox( + vm_permit, + shared_args, + execution_args, + connection_pool, + tx, + block_args, + BootloaderJobType::TransactionExecution, + false, + storage_read_cache, + ) + .await +} + +/// This method assumes that (block with number `resolved_block_number` is present in DB) +/// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) +#[allow(clippy::too_many_arguments)] +#[tracing::instrument(skip_all)] +async fn execute_tx_in_sandbox( + vm_permit: &VmPermit<'_>, + shared_args: TxSharedArgs, + execution_args: TxExecutionArgs, + connection_pool: ConnectionPool, + tx: Transaction, + block_args: BlockArgs, + job_type: BootloaderJobType, + trace_call: bool, + storage_read_cache: &mut HashMap, +) -> ( + Result, + TransactionExecutionMetrics, +) { + let total_factory_deps = tx + .execute + .factory_deps + .as_ref() + .map_or(0, |deps| deps.len() as u16); + + let rt_handle = vm_permit.rt_handle(); + let moved_cache = mem::take(storage_read_cache); + let (execution_result, moved_cache) = tokio::task::spawn_blocking(move || { + let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); + let execution_mode = execution_args.execution_mode; + let result = apply::apply_vm_in_sandbox( + rt_handle, + &shared_args, + &execution_args, + &connection_pool, + tx, + block_args, + moved_cache, + |vm, tx| { + push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); + let result = if trace_call { + vm.execute_till_block_end_with_call_tracer(job_type) + } else { + vm.execute_till_block_end(job_type) + }; + result.full_result + }, + ); + span.exit(); + result + }) + .await + .unwrap(); + + *storage_read_cache = moved_cache; + + let tx_execution_metrics = + vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); + let result = match execution_result.revert_reason { + None => Ok(execution_result), + Some(revert) => Err(revert.revert_reason.into()), + }; + (result, tx_execution_metrics) +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs new file mode 100644 index 000000000000..38046a401bc8 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -0,0 +1,253 @@ +use std::time::{Duration, Instant}; + +use tokio::runtime::{Handle, Runtime}; +use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; +use zksync_config::constants::PUBLISH_BYTECODE_OVERHEAD; +use zksync_contracts::BaseSystemContracts; +use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; +use zksync_state::{FactoryDepsCache, PostgresStorage, ReadStorage, StorageView}; +use zksync_types::{api, AccountTreeId, MiniblockNumber, U256}; +use zksync_utils::bytecode::{compress_bytecode, hash_bytecode}; + +// Note: keep the modules private, and instead re-export functions that make public interface. +mod apply; +mod error; +mod execute; +mod validate; +mod vm_metrics; + +pub(super) use self::{ + error::SandboxExecutionError, + execute::{execute_tx_eth_call, execute_tx_with_pending_state, TxExecutionArgs}, +}; + +/// Permit to invoke VM code. +/// Any publicly-facing method that invokes VM is expected to accept a reference to this structure, +/// as a proof that the caller obtained a token from `VmConcurrencyLimiter`, +#[derive(Debug)] +pub struct VmPermit<'a> { + _permit: tokio::sync::SemaphorePermit<'a>, + /// A handle to the runtime that is used to query the VM storage. + rt_handle: Handle, +} + +impl<'a> VmPermit<'a> { + fn rt_handle(&self) -> Handle { + self.rt_handle.clone() + } +} + +/// Synchronization primitive that limits the number of concurrent VM executions. +/// This is required to prevent the server from being overloaded with the VM calls. +/// +/// This structure is expected to be used in every method that executes VM code, on a topmost +/// level (i.e. before any async calls are made or VM is instantiated), +/// +/// Note that the actual limit on the number of VMs is a minimum of the limit in this structure, +/// *and* the size of the blocking tokio threadpool. So, even if the limit is set to 1024, but +/// tokio is configured to have no more than 512 blocking threads, the actual limit will be 512. +#[derive(Debug)] +pub struct VmConcurrencyLimiter { + /// Semaphore that limits the number of concurrent VM executions. + limiter: tokio::sync::Semaphore, + /// A dedicated runtime used to query the VM storage in the API. + vm_runtime: RuntimeAccess, +} + +/// Either a dedicated runtime, or a handle to the externally creatd runtime. +#[derive(Debug)] +enum RuntimeAccess { + Owned(Runtime), + Handle(Handle), +} + +impl RuntimeAccess { + fn handle(&self) -> Handle { + match self { + RuntimeAccess::Owned(rt) => rt.handle().clone(), + RuntimeAccess::Handle(handle) => handle.clone(), + } + } +} + +impl VmConcurrencyLimiter { + pub fn new(max_concurrency: Option) -> Self { + if let Some(max_concurrency) = max_concurrency { + vlog::info!("Initializing the VM concurrency limiter with a separate runtime. Max concurrency: {:?}", max_concurrency); + let vm_runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed to initialize VM runtime"); + Self { + limiter: tokio::sync::Semaphore::new(max_concurrency), + vm_runtime: RuntimeAccess::Owned(vm_runtime), + } + } else { + // Default concurrency is chosen to be beyond the number of connections in the pool / + // amount of blocking threads in the tokio threadpool. + // The real "concurrency limiter" will be represented by the lesser of these values. + const DEFAULT_CONCURRENCY_LIMIT: usize = 2048; + vlog::info!("Initializing the VM concurrency limiter with the default runtime"); + Self { + limiter: tokio::sync::Semaphore::new(DEFAULT_CONCURRENCY_LIMIT), + vm_runtime: RuntimeAccess::Handle(tokio::runtime::Handle::current()), + } + } + } + + /// Waits until there is a free slot in the concurrency limiter. + /// Returns a permit that should be dropped when the VM execution is finished. + pub async fn acquire(&self) -> VmPermit<'_> { + let available_permits = self.limiter.available_permits(); + metrics::histogram!( + "api.web3.sandbox.semaphore.permits", + available_permits as f64 + ); + + let start = Instant::now(); + let permit = self + .limiter + .acquire() + .await + .expect("Semaphore is never closed"); + let elapsed = start.elapsed(); + // We don't want to emit too many logs. + if elapsed > Duration::from_millis(10) { + vlog::debug!( + "Permit is obtained. Available permits: {available_permits}. Took {elapsed:?}" + ); + } + metrics::histogram!("api.web3.sandbox", elapsed, "stage" => "vm_concurrency_limiter_acquire"); + VmPermit { + _permit: permit, + rt_handle: self.vm_runtime.handle(), + } + } +} + +pub(super) fn adjust_l1_gas_price_for_tx( + l1_gas_price: u64, + fair_l2_gas_price: u64, + tx_gas_per_pubdata_limit: U256, +) -> u64 { + let (_, current_pubdata_price) = + derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price); + if U256::from(current_pubdata_price) <= tx_gas_per_pubdata_limit { + // The current pubdata price is small enough + l1_gas_price + } else { + // gasPerPubdata = ceil(17 * l1gasprice / fair_l2_gas_price) + // gasPerPubdata <= 17 * l1gasprice / fair_l2_gas_price + 1 + // fair_l2_gas_price(gasPerPubdata - 1) / 17 <= l1gasprice + let l1_gas_price = U256::from(fair_l2_gas_price) + * (tx_gas_per_pubdata_limit - U256::from(1u32)) + / U256::from(17); + + l1_gas_price.as_u64() + } +} + +async fn get_pending_state( + connection: &mut StorageProcessor<'_>, +) -> (api::BlockId, MiniblockNumber) { + let block_id = api::BlockId::Number(api::BlockNumber::Pending); + let resolved_block_number = connection + .blocks_web3_dal() + .resolve_block_id(block_id) + .await + .unwrap() + .expect("Pending block should be present"); + (block_id, resolved_block_number) +} + +/// Returns the number of the pubdata that the transaction will spend on factory deps. +pub(super) async fn get_pubdata_for_factory_deps( + connection_pool: &ConnectionPool, + factory_deps: &[Vec], + factory_deps_cache: FactoryDepsCache, +) -> u32 { + if factory_deps.is_empty() { + return 0; // Shortcut for the common case allowing to not acquire DB connections etc. + } + + let mut connection = connection_pool.access_storage_tagged("api").await; + let (_, block_number) = get_pending_state(&mut connection).await; + drop(connection); + + let rt_handle = Handle::current(); + let connection_pool = connection_pool.clone(); + let factory_deps = factory_deps.to_vec(); + tokio::task::spawn_blocking(move || { + let connection = rt_handle.block_on(connection_pool.access_storage_tagged("api")); + let storage = PostgresStorage::new(rt_handle, connection, block_number, false) + .with_factory_deps_cache(factory_deps_cache); + let mut storage_view = StorageView::new(storage); + + let effective_lengths = factory_deps.iter().map(|bytecode| { + if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) { + return 0; + } + + let length = if let Ok(compressed) = compress_bytecode(bytecode) { + compressed.len() + } else { + bytecode.len() + }; + length as u32 + PUBLISH_BYTECODE_OVERHEAD + }); + effective_lengths.sum() + }) + .await + .unwrap() +} + +/// Arguments for VM execution not specific to a particular transaction. +#[derive(Debug, Clone)] +pub(crate) struct TxSharedArgs { + pub operator_account: AccountTreeId, + pub l1_gas_price: u64, + pub fair_l2_gas_price: u64, + pub base_system_contracts: BaseSystemContracts, + pub factory_deps_cache: FactoryDepsCache, +} + +/// Information about a block provided to VM. +#[derive(Debug, Clone, Copy)] +pub(crate) struct BlockArgs { + block_id: api::BlockId, + resolved_block_number: MiniblockNumber, + block_timestamp_s: Option, +} + +impl BlockArgs { + async fn pending(connection: &mut StorageProcessor<'_>) -> Self { + let (block_id, resolved_block_number) = get_pending_state(connection).await; + Self { + block_id, + resolved_block_number, + block_timestamp_s: None, + } + } + + /// Loads block information from DB. + pub async fn new( + connection: &mut StorageProcessor<'_>, + block_id: api::BlockId, + ) -> Result, SqlxError> { + let resolved_block_number = connection + .blocks_web3_dal() + .resolve_block_id(block_id) + .await?; + let Some(resolved_block_number) = resolved_block_number else { return Ok(None) }; + + let block_timestamp_s = connection + .blocks_web3_dal() + .get_block_timestamp(resolved_block_number) + .await?; + Ok(Some(Self { + block_id, + resolved_block_number, + block_timestamp_s, + })) + } +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs new file mode 100644 index 000000000000..fe88cb42ce33 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -0,0 +1,142 @@ +use std::{ + collections::{HashMap, HashSet}, + time::Instant, +}; + +use vm::oracles::tracer::{ValidationError, ValidationTracerParams}; +use vm::vm_with_bootloader::push_transaction_to_bootloader_memory; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, U256}; + +use super::{ + adjust_l1_gas_price_for_tx, apply, BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, +}; + +impl TxSharedArgs { + pub async fn validate_tx_with_pending_state( + mut self, + vm_permit: &VmPermit<'_>, // Proof that permit was acquired. + connection_pool: ConnectionPool, + tx: L2Tx, + computational_gas_limit: u32, + ) -> Result<(), ValidationError> { + let mut connection = connection_pool.access_storage_tagged("api").await; + let block_args = BlockArgs::pending(&mut connection).await; + drop(connection); + self.adjust_l1_gas_price(tx.common_data.fee.gas_per_pubdata_limit); + self.validate_tx_in_sandbox( + connection_pool, + vm_permit.rt_handle(), + tx, + block_args, + computational_gas_limit, + ) + .await + } + + // In order for validation to pass smoothlessly, we need to ensure that block's required gasPerPubdata will be + // <= to the one in the transaction itself. + pub fn adjust_l1_gas_price(&mut self, gas_per_pubdata_limit: U256) { + self.l1_gas_price = adjust_l1_gas_price_for_tx( + self.l1_gas_price, + self.fair_l2_gas_price, + gas_per_pubdata_limit, + ); + } + + async fn validate_tx_in_sandbox( + self, + connection_pool: ConnectionPool, + rt_handle: tokio::runtime::Handle, + tx: L2Tx, + block_args: BlockArgs, + computational_gas_limit: u32, + ) -> Result<(), ValidationError> { + let stage_started_at = Instant::now(); + let mut connection = connection_pool.access_storage_tagged("api").await; + let validation_params = + get_validation_params(&mut connection, &tx, computational_gas_limit).await; + drop(connection); + + let execution_args = TxExecutionArgs::for_validation(&tx); + let execution_mode = execution_args.execution_mode; + let tx: Transaction = tx.into(); + let (validation_result, _) = tokio::task::spawn_blocking(move || { + let span = tracing::debug_span!("validate_in_sandbox").entered(); + let result = apply::apply_vm_in_sandbox( + rt_handle, + &self, + &execution_args, + &connection_pool, + tx, + block_args, + HashMap::new(), + |vm, tx| { + let stage_started_at = Instant::now(); + let span = tracing::debug_span!("validation").entered(); + push_transaction_to_bootloader_memory(vm, &tx, execution_mode, None); + let result = vm.execute_validation(validation_params); + + metrics::histogram!("api.web3.sandbox", stage_started_at.elapsed(), "stage" => "validation"); + span.exit(); + result + }, + ); + span.exit(); + result + }).await.unwrap(); + + metrics::histogram!("server.api.validation_sandbox", stage_started_at.elapsed(), "stage" => "validate_in_sandbox"); + validation_result + } +} + +// Some slots can be marked as "trusted". That is needed for slots which can not be +// trusted to change between validation and execution in general case, but +// sometimes we can safely rely on them to not change often. +async fn get_validation_params( + connection: &mut StorageProcessor<'_>, + tx: &L2Tx, + computational_gas_limit: u32, +) -> ValidationTracerParams { + let start_time = Instant::now(); + let user_address = tx.common_data.initiator_address; + let paymaster_address = tx.common_data.paymaster_params.paymaster; + + // This method assumes that the number of tokens is relatively low. When it grows + // we may need to introduce some kind of caching. + let all_tokens = connection.tokens_dal().get_all_l2_token_addresses().await; + metrics::gauge!("api.execution.tokens.amount", all_tokens.len() as f64); + + let span = tracing::debug_span!("compute_trusted_slots_for_validation").entered(); + let trusted_slots: HashSet<_> = all_tokens + .iter() + .flat_map(|&token| TRUSTED_TOKEN_SLOTS.iter().map(move |&slot| (token, slot))) + .collect(); + + // We currently don't support any specific trusted addresses. + let trusted_addresses = HashSet::new(); + + // The slots the value of which will be added as allowed address on the fly. + // Required for working with transparent proxies. + let trusted_address_slots: HashSet<_> = all_tokens + .into_iter() + .flat_map(|token| TRUSTED_ADDRESS_SLOTS.iter().map(move |&slot| (token, slot))) + .collect(); + + metrics::gauge!( + "api.execution.trusted_address_slots.amount", + trusted_address_slots.len() as f64 + ); + span.exit(); + + metrics::histogram!("api.execution.get_validation_params", start_time.elapsed()); + ValidationTracerParams { + user_address, + paymaster_address, + trusted_slots, + trusted_addresses, + trusted_address_slots, + computational_gas_limit, + } +} diff --git a/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs b/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs new file mode 100644 index 000000000000..fc7e06cf632f --- /dev/null +++ b/core/bin/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs @@ -0,0 +1,191 @@ +//! Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs +//! or reporting the VM memory usage. + +use std::time::Duration; + +use vm::{HistoryMode, VmExecutionResult, VmInstance}; +use zksync_state::StorageViewMetrics; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + fee::TransactionExecutionMetrics, + storage_writes_deduplicator::StorageWritesDeduplicator, +}; +use zksync_utils::bytecode::bytecode_len_in_bytes; + +pub(super) fn report_storage_view_metrics( + tx_id: &str, + oracles_sizes: usize, + vm_execution_took: Duration, + metrics: StorageViewMetrics, +) { + metrics::histogram!( + "runtime_context.memory.storage_view_cache_size", + metrics.cache_size as f64 + ); + metrics::histogram!( + "runtime_context.memory", + (oracles_sizes + metrics.cache_size) as f64 + ); + + let total_storage_invocations = + metrics.get_value_storage_invocations + metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + metrics.time_spent_on_get_value + metrics.time_spent_on_set_value; + + metrics::histogram!( + "runtime_context.storage_interaction.amount", + metrics.storage_invocations_missed as f64, + "interaction" => "missed" + ); + metrics::histogram!( + "runtime_context.storage_interaction.amount", + metrics.get_value_storage_invocations as f64, + "interaction" => "get_value" + ); + metrics::histogram!( + "runtime_context.storage_interaction.amount", + metrics.set_value_storage_invocations as f64, + "interaction" => "set_value" + ); + metrics::histogram!( + "runtime_context.storage_interaction.amount", + total_storage_invocations as f64, + "interaction" => "total" + ); + + metrics::histogram!( + "runtime_context.storage_interaction.duration", + metrics.time_spent_on_storage_missed, + "interaction" => "missed" + ); + metrics::histogram!( + "runtime_context.storage_interaction.duration", + metrics.time_spent_on_get_value, + "interaction" => "get_value" + ); + metrics::histogram!( + "runtime_context.storage_interaction.duration", + metrics.time_spent_on_set_value, + "interaction" => "set_value" + ); + metrics::histogram!( + "runtime_context.storage_interaction.duration", + total_time_spent_in_storage, + "interaction" => "total" + ); + + if total_storage_invocations > 0 { + metrics::histogram!( + "runtime_context.storage_interaction.duration_per_unit", + total_time_spent_in_storage.div_f64(total_storage_invocations as f64), + "interaction" => "total" + ); + } + if metrics.storage_invocations_missed > 0 { + let duration_per_unit = metrics + .time_spent_on_storage_missed + .div_f64(metrics.storage_invocations_missed as f64); + metrics::histogram!( + "runtime_context.storage_interaction.duration_per_unit", + duration_per_unit, + "interaction" => "missed" + ); + } + + metrics::histogram!( + "runtime_context.storage_interaction.ratio", + total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64(), + ); + + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + vlog::info!( + "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + metrics.storage_invocations_missed, + metrics.get_value_storage_invocations, + metrics.set_value_storage_invocations, + metrics.time_spent_on_storage_missed, + metrics.time_spent_on_get_value, + metrics.time_spent_on_set_value, + ); + } +} + +pub(super) fn collect_tx_execution_metrics( + contracts_deployed: u16, + result: &VmExecutionResult, +) -> TransactionExecutionMetrics { + let event_topics = result + .events + .iter() + .map(|event| event.indexed_topics.len() as u16) + .sum(); + + let l2_l1_long_messages = extract_long_l2_to_l1_messages(&result.events) + .iter() + .map(|event| event.len()) + .sum(); + + let published_bytecode_bytes = extract_published_bytecodes(&result.events) + .iter() + .map(|bytecode_hash| bytecode_len_in_bytes(*bytecode_hash)) + .sum(); + + let writes_metrics = + StorageWritesDeduplicator::apply_on_empty_state(&result.storage_log_queries); + + TransactionExecutionMetrics { + initial_storage_writes: writes_metrics.initial_storage_writes, + repeated_storage_writes: writes_metrics.repeated_storage_writes, + gas_used: result.gas_used as usize, + event_topics, + published_bytecode_bytes, + l2_l1_long_messages, + l2_l1_logs: result.l2_to_l1_logs.len(), + contracts_used: result.contracts_used, + contracts_deployed, + vm_events: result.events.len(), + storage_logs: result.storage_log_queries.len(), + total_log_queries: result.total_log_queries, + cycles_used: result.cycles_used, + computational_gas_used: result.computational_gas_used, + } +} + +/// Returns the sum of all oracles' sizes. +pub(super) fn record_vm_memory_metrics(vm: &VmInstance<'_, H>) -> usize { + let event_sink_inner = vm.state.event_sink.get_size(); + let event_sink_history = vm.state.event_sink.get_history_size(); + let memory_inner = vm.state.memory.get_size(); + let memory_history = vm.state.memory.get_history_size(); + let decommittment_processor_inner = vm.state.decommittment_processor.get_size(); + let decommittment_processor_history = vm.state.decommittment_processor.get_history_size(); + let storage_inner = vm.state.storage.get_size(); + let storage_history = vm.state.storage.get_history_size(); + + metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.event_sink_size", event_sink_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.memory_size", memory_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.memory_size", memory_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.decommitter_size", decommittment_processor_history as f64, "type" => "history"); + metrics::histogram!("runtime_context.memory.storage_size", storage_inner as f64, "type" => "inner"); + metrics::histogram!("runtime_context.memory.storage_size", storage_history as f64, "type" => "history"); + + [ + event_sink_inner, + event_sink_history, + memory_inner, + memory_history, + decommittment_processor_inner, + decommittment_processor_history, + storage_inner, + storage_history, + ] + .iter() + .sum::() +} diff --git a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs index 3df4ee9e6925..cde6ef551eb1 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_decl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_decl.rs @@ -1,4 +1,4 @@ -use zksync_config::configs::api::Explorer as ExplorerApiConfig; +use zksync_config::configs::api::ExplorerApiConfig; use zksync_dal::connection::ConnectionPool; use zksync_types::Address; @@ -67,6 +67,14 @@ impl RestApi { "/contract_verification/solc_versions", web::get().to(Self::contract_verification_solc_versions), ) + .route( + "/contract_verification/zkvyper_versions", + web::get().to(Self::contract_verification_zkvyper_versions), + ) + .route( + "/contract_verification/vyper_versions", + web::get().to(Self::contract_verification_vyper_versions), + ) .route( "/contract_verification/{id}", web::get().to(Self::contract_verification_request_status), diff --git a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs index 1f833f698225..9ac46c327fca 100644 --- a/core/bin/zksync_core/src/api_server/explorer/api_impl.rs +++ b/core/bin/zksync_core/src/api_server/explorer/api_impl.rs @@ -40,19 +40,22 @@ impl RestApi { let account_type = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .accounts_dal() .get_account_type(*address) + .await .unwrap(); let response = match account_type { AccountType::EOA => ok_json(AddressDetails::Account( - self_.account_details_inner(address), + self_.account_details_inner(address).await, )), AccountType::Contract => { // If account type is a contract, then `contract_details_inner` must return `Some`. let contract_details = self_ .contract_details_inner(address) + .await .expect("Failed to get contract info"); ok_json(AddressDetails::Contract(contract_details)) } @@ -62,24 +65,30 @@ impl RestApi { response } - fn account_details_inner(&self, address: web::Path
) -> AccountDetails { - let mut storage = self.replica_connection_pool.access_storage_blocking(); + async fn account_details_inner(&self, address: web::Path
) -> AccountDetails { + let mut storage = self + .replica_connection_pool + .access_storage_tagged("api") + .await; let balances = storage .explorer() .accounts_dal() .get_balances_for_address(*address) + .await .unwrap(); let (sealed_nonce, verified_nonce) = storage .explorer() .accounts_dal() .get_account_nonces(*address) + .await .unwrap(); let account_type = storage .explorer() .accounts_dal() .get_account_type(*address) + .await .unwrap(); AccountDetails { @@ -97,28 +106,34 @@ impl RestApi { address: web::Path
, ) -> ActixResult { let start = Instant::now(); - let account_details = self_.account_details_inner(address); + let account_details = self_.account_details_inner(address).await; metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "account_details"); ok_json(account_details) } - fn contract_details_inner(&self, address: web::Path
) -> Option { - let mut storage = self.replica_connection_pool.access_storage_blocking(); + async fn contract_details_inner(&self, address: web::Path
) -> Option { + let mut storage = self + .replica_connection_pool + .access_storage_tagged("api") + .await; let contract_info = storage .explorer() .misc_dal() .get_contract_info(*address) + .await .unwrap(); if let Some(contract_info) = contract_info { let contract_stats = storage .explorer() .misc_dal() .get_contract_stats(*address) + .await .unwrap(); let balances = storage .explorer() .accounts_dal() .get_balances_for_address(*address) + .await .unwrap(); Some(ContractDetails { info: contract_info, @@ -137,7 +152,7 @@ impl RestApi { ) -> ActixResult { let start = Instant::now(); - let response = match self_.contract_details_inner(address) { + let response = match self_.contract_details_inner(address).await { Some(contract_details) => ok_json(contract_details), None => Ok(HttpResponse::NotFound().finish()), }; @@ -194,12 +209,16 @@ impl RestApi { return Ok(res); } - let mut storage = self_.replica_connection_pool.access_storage_blocking(); + let mut storage = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await; if let Some(address) = query.address { match storage .explorer() .accounts_dal() .get_account_type(address) + .await .unwrap() { AccountType::EOA => query.account_address = Some(address), @@ -221,6 +240,7 @@ impl RestApi { self_.api_config.offset_limit(), self_.l2_erc20_bridge_addr, ) + .await .unwrap() } else { // If there is no filter by account address @@ -237,6 +257,7 @@ impl RestApi { self_.api_config.offset_limit(), self_.l2_erc20_bridge_addr, ) + .await .unwrap() }; @@ -266,10 +287,12 @@ impl RestApi { let tx_details = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .transactions_dal() .get_transaction_details(*hash, self_.l2_erc20_bridge_addr) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "transaction_details"); @@ -291,10 +314,12 @@ impl RestApi { let blocks = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .blocks_dal() .get_blocks_page(query, self_.network_stats.read().await.last_verified) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "block_pagination"); @@ -310,10 +335,12 @@ impl RestApi { let block_details = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .blocks_dal() .get_block_details(MiniblockNumber(*number), self_.fee_account_addr) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "block_details"); @@ -333,11 +360,15 @@ impl RestApi { return Ok(res); } let last_verified_miniblock = self_.network_stats.read().await.last_verified; - let mut storage = self_.replica_connection_pool.access_storage_blocking(); + let mut storage = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await; let last_verified_l1_batch = storage .blocks_web3_dal() .get_l1_batch_number_of_miniblock(last_verified_miniblock) + .await .unwrap() .expect("Verified miniblock must be included in l1 batch"); @@ -345,6 +376,7 @@ impl RestApi { .explorer() .blocks_dal() .get_l1_batches_page(query, last_verified_l1_batch) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_pagination"); @@ -360,10 +392,12 @@ impl RestApi { let l1_batch_details = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .blocks_dal() .get_l1_batch_details(L1BatchNumber(*number)) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "l1_batch_details"); @@ -382,10 +416,12 @@ impl RestApi { let token_details = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .misc_dal() .get_token_details(*address) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "token_details"); @@ -395,6 +431,17 @@ impl RestApi { } } + #[tracing::instrument(skip(query))] + fn validate_contract_verification_query( + query: &VerificationIncomingRequest, + ) -> Result<(), HttpResponse> { + if query.source_code_data.compiler_type() != query.compiler_versions.compiler_type() { + return Err(HttpResponse::BadRequest().body("incorrect compiler versions")); + } + + Ok(()) + } + /// Add a contract verification job to the queue if the requested contract wasn't previously verified. #[tracing::instrument(skip(self_, request))] pub async fn contract_verification( @@ -402,12 +449,18 @@ impl RestApi { Json(request): Json, ) -> ActixResult { let start = Instant::now(); - - let mut storage = self_.master_connection_pool.access_storage_blocking(); + if let Err(res) = Self::validate_contract_verification_query(&request) { + return Ok(res); + } + let mut storage = self_ + .master_connection_pool + .access_storage_tagged("api") + .await; if !storage .storage_logs_dal() .is_contract_deployed_at_address(request.contract_address) + .await { return Ok( HttpResponse::BadRequest().body("There is no deployed contract on this address") @@ -417,6 +470,7 @@ impl RestApi { .explorer() .contract_verification_dal() .is_contract_verified(request.contract_address) + .await { return Ok(HttpResponse::BadRequest().body("This contract is already verified")); } @@ -425,6 +479,7 @@ impl RestApi { .explorer() .contract_verification_dal() .add_contract_verification_request(request) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification"); @@ -443,10 +498,12 @@ impl RestApi { let events = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .events_dal() .get_events_page(query, self_.api_config.offset_limit()) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "events_pagination"); @@ -463,10 +520,12 @@ impl RestApi { let status = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .contract_verification_dal() .get_verification_request_status(*id) + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_request_status"); @@ -484,10 +543,12 @@ impl RestApi { let versions = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .contract_verification_dal() .get_zksolc_versions() + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_zksolc_versions"); @@ -502,13 +563,55 @@ impl RestApi { let versions = self_ .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .contract_verification_dal() .get_solc_versions() + .await .unwrap(); metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_solc_versions"); ok_json(versions) } + + #[tracing::instrument(skip(self_))] + pub async fn contract_verification_zkvyper_versions( + self_: web::Data, + ) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .explorer() + .contract_verification_dal() + .get_zkvyper_versions() + .await + .unwrap(); + + metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_zkvyper_versions"); + ok_json(versions) + } + + #[tracing::instrument(skip(self_))] + pub async fn contract_verification_vyper_versions( + self_: web::Data, + ) -> ActixResult { + let start = Instant::now(); + + let versions = self_ + .replica_connection_pool + .access_storage_tagged("api") + .await + .explorer() + .contract_verification_dal() + .get_vyper_versions() + .await + .unwrap(); + + metrics::histogram!("api.explorer.call", start.elapsed(), "method" => "contract_verification_vyper_versions"); + ok_json(versions) + } } diff --git a/core/bin/zksync_core/src/api_server/explorer/mod.rs b/core/bin/zksync_core/src/api_server/explorer/mod.rs index 95ef3e58290d..ec35078fb013 100644 --- a/core/bin/zksync_core/src/api_server/explorer/mod.rs +++ b/core/bin/zksync_core/src/api_server/explorer/mod.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::time::Duration; -use zksync_config::configs::api::Explorer as ExplorerApiConfig; +use zksync_config::configs::api::ExplorerApiConfig; use zksync_dal::connection::ConnectionPool; use zksync_types::Address; use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; diff --git a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs b/core/bin/zksync_core/src/api_server/explorer/network_stats.rs index 3bad1f26a60f..dcda339b2703 100644 --- a/core/bin/zksync_core/src/api_server/explorer/network_stats.rs +++ b/core/bin/zksync_core/src/api_server/explorer/network_stats.rs @@ -49,15 +49,17 @@ impl SharedNetworkStats { timer.tick().await; - let mut storage = connection_pool.access_storage_blocking(); + let mut storage = connection_pool.access_storage_tagged("api").await; let last_sealed = storage .blocks_web3_dal() .get_sealed_miniblock_number() + .await .unwrap(); let last_verified = storage .blocks_web3_dal() .resolve_block_id(api::BlockId::Number(api::BlockNumber::Finalized)) + .await .unwrap() .unwrap_or(MiniblockNumber(0)); let prev_stats = self.read().await; @@ -65,6 +67,7 @@ impl SharedNetworkStats { .explorer() .transactions_dal() .get_transactions_count_between(prev_stats.last_sealed + 1, last_sealed) + .await .unwrap(); let stats = NetworkStats { diff --git a/core/bin/zksync_core/src/api_server/healthcheck.rs b/core/bin/zksync_core/src/api_server/healthcheck.rs index 370bfa1efbee..73d958d97c60 100644 --- a/core/bin/zksync_core/src/api_server/healthcheck.rs +++ b/core/bin/zksync_core/src/api_server/healthcheck.rs @@ -14,7 +14,7 @@ pub struct Response { #[get("/health")] async fn healthcheck(healthchecks: web::Data<[Box]>) -> impl Responder { for healthcheck in healthchecks.iter() { - match healthcheck.check_health() { + match healthcheck.check_health().await { CheckHealthStatus::NotReady(message) => { let response = Response { message }; return HttpResponse::ServiceUnavailable().json(response); @@ -38,13 +38,25 @@ fn run_server(bind_address: SocketAddr, healthchecks: Vec>) .run() } +pub struct HealthCheckHandle { + server: tokio::task::JoinHandle<()>, + stop_sender: watch::Sender, +} + +impl HealthCheckHandle { + pub async fn stop(self) { + self.stop_sender.send(true).ok(); + self.server.await.unwrap(); + } +} + /// Start HTTP healthcheck API pub fn start_server_thread_detached( addr: SocketAddr, healthchecks: Vec>, - mut stop_receiver: watch::Receiver, -) -> tokio::task::JoinHandle<()> { +) -> HealthCheckHandle { let (handler, panic_sender) = spawn_panic_handler(); + let (stop_sender, mut stop_receiver) = watch::channel(false); std::thread::Builder::new() .name("healthcheck".to_string()) .spawn(move || { @@ -64,5 +76,8 @@ pub fn start_server_thread_detached( }) .expect("Failed to spawn thread for REST API"); - handler + HealthCheckHandle { + server: handler, + stop_sender, + } } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs index 5058d93f0d8b..88c0ed8bee90 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/mod.rs @@ -1,63 +1,54 @@ //! Helper module to submit transactions into the zkSync Network. -// Built-in uses -use std::{cmp::min, num::NonZeroU32, sync::Arc, time::Instant}; // External uses -use governor::clock::MonotonicClock; -use governor::middleware::NoOpMiddleware; -use governor::state::{InMemoryState, NotKeyed}; -use governor::{Quota, RateLimiter}; - -use vm::vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, TxExecutionMode}; -use vm::zk_evm::zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK; -use zksync_config::configs::chain::StateKeeperConfig; +use governor::{ + clock::MonotonicClock, + middleware::NoOpMiddleware, + state::{InMemoryState, NotKeyed}, + Quota, RateLimiter, +}; + +// Built-in uses +use std::{cmp, collections::HashMap, num::NonZeroU32, sync::Arc, time::Instant}; + +// Workspace uses +use vm::{ + transaction_data::{derive_overhead, OverheadCoeficients}, + vm_with_bootloader::derive_base_fee_and_gas_per_pubdata, + zk_evm::zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, + VmExecutionResult, +}; +use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::{ BaseSystemContracts, SystemContractCode, ESTIMATE_FEE_BLOCK_CODE, PLAYGROUND_BLOCK_BOOTLOADER_CODE, }; -use zksync_dal::transactions_dal::L2TxSubmissionResult; - -use vm::transaction_data::TransactionData; -use zksync_config::ZkSyncConfig; -use zksync_types::fee::TransactionExecutionMetrics; - +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; +use zksync_state::FactoryDepsCache; use zksync_types::{ - ExecuteTransactionCommon, Transaction, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, - MAX_NEW_FACTORY_DEPS, -}; - -use zksync_dal::ConnectionPool; - -use zksync_types::{ - api, - fee::Fee, + fee::{Fee, TransactionExecutionMetrics}, get_code_key, get_intrinsic_constants, l2::error::TxCheckError::TxDuplication, l2::L2Tx, - tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, utils::storage_key_for_eth_balance, - AccountTreeId, Address, Nonce, H160, H256, U256, + AccountTreeId, Address, ExecuteTransactionCommon, Nonce, StorageKey, Transaction, H160, H256, + MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; - use zksync_utils::{bytes_to_be_words, h256_to_u256}; // Local uses use crate::api_server::execution_sandbox::{ - adjust_l1_gas_price_for_tx, execute_tx_with_pending_state, get_pubdata_for_factory_deps, - validate_tx_with_pending_state, SandboxExecutionError, + adjust_l1_gas_price_for_tx, execute_tx_eth_call, execute_tx_with_pending_state, + get_pubdata_for_factory_deps, BlockArgs, SandboxExecutionError, TxExecutionArgs, TxSharedArgs, + VmConcurrencyLimiter, VmPermit, }; - -use crate::gas_tracker::{gas_count_from_tx_and_metrics, gas_count_from_writes}; use crate::l1_gas_price::L1GasPriceProvider; -use crate::state_keeper::seal_criteria::conditional_sealer::ConditionalSealer; -use crate::state_keeper::seal_criteria::SealResolution; +use crate::state_keeper::seal_criteria::{ConditionalSealer, SealData}; -pub mod error; -pub use error::SubmitTxError; -use vm::transaction_data::{derive_overhead, OverheadCoeficients}; +mod error; +mod proxy; -pub mod proxy; -pub use proxy::TxProxy; +pub(super) use self::{error::SubmitTxError, proxy::TxProxy}; /// Type alias for the rate limiter implementation. type TxSenderRateLimiter = @@ -104,7 +95,7 @@ impl TxSenderBuilder { } } - pub fn with_tx_proxy(mut self, main_node_url: String) -> Self { + pub fn with_tx_proxy(mut self, main_node_url: &str) -> Self { self.proxy = Some(TxProxy::new(main_node_url)); self } @@ -119,20 +110,26 @@ impl TxSenderBuilder { self } - pub fn build( + pub async fn build( self, l1_gas_price_source: Arc, default_aa_hash: H256, + vm_concurrency_limiter: Arc, + factory_deps_cache: FactoryDepsCache, ) -> TxSender { assert!( self.master_connection_pool.is_some() || self.proxy.is_some(), "Either master connection pool or proxy must be set" ); - let mut storage = self.replica_connection_pool.access_storage_blocking(); + let mut storage = self + .replica_connection_pool + .access_storage_tagged("api") + .await; let default_aa_bytecode = storage .storage_dal() .get_factory_dep(default_aa_hash) + .await .expect("Default AA hash must be present in the database"); drop(storage); @@ -161,6 +158,8 @@ impl TxSenderBuilder { rate_limiter: self.rate_limiter, proxy: self.proxy, state_keeper_config: self.state_keeper_config, + vm_concurrency_limiter, + factory_deps_cache, })) } } @@ -169,7 +168,7 @@ impl TxSenderBuilder { /// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc) /// may require different configuration layouts. /// The intention is to only keep the actually used information here. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct TxSenderConfig { pub fee_account_addr: Address, pub gas_price_scale_factor: f64, @@ -178,24 +177,26 @@ pub struct TxSenderConfig { pub fair_l2_gas_price: u64, pub vm_execution_cache_misses_limit: Option, pub validation_computational_gas_limit: u32, + pub default_aa: H256, + pub bootloader: H256, } -impl From for TxSenderConfig { - fn from(config: ZkSyncConfig) -> Self { +impl TxSenderConfig { + pub fn new( + state_keeper_config: &StateKeeperConfig, + web3_json_config: &Web3JsonRpcConfig, + ) -> Self { Self { - fee_account_addr: config.chain.state_keeper.fee_account_addr, - gas_price_scale_factor: config.api.web3_json_rpc.gas_price_scale_factor, - max_nonce_ahead: config.api.web3_json_rpc.max_nonce_ahead, - max_allowed_l2_tx_gas_limit: config.chain.state_keeper.max_allowed_l2_tx_gas_limit, - fair_l2_gas_price: config.chain.state_keeper.fair_l2_gas_price, - vm_execution_cache_misses_limit: config - .api - .web3_json_rpc - .vm_execution_cache_misses_limit, - validation_computational_gas_limit: config - .chain - .state_keeper + fee_account_addr: state_keeper_config.fee_account_addr, + gas_price_scale_factor: web3_json_config.gas_price_scale_factor, + max_nonce_ahead: web3_json_config.max_nonce_ahead, + max_allowed_l2_tx_gas_limit: state_keeper_config.max_allowed_l2_tx_gas_limit, + fair_l2_gas_price: state_keeper_config.fair_l2_gas_price, + vm_execution_cache_misses_limit: web3_json_config.vm_execution_cache_misses_limit, + validation_computational_gas_limit: state_keeper_config .validation_computational_gas_limit, + default_aa: state_keeper_config.default_aa_hash, + bootloader: state_keeper_config.bootloader_hash, } } } @@ -216,6 +217,10 @@ pub struct TxSenderInner { /// This field may be omitted on the external node, since the configuration may change unexpectedly. /// If this field is set to `None`, `TxSender` will assume that any transaction is executable. state_keeper_config: Option, + /// Used to limit the amount of VMs that can be executed simultaneously. + pub(super) vm_concurrency_limiter: Arc, + // Smart contract source code cache. + pub(super) factory_deps_cache: FactoryDepsCache, } pub struct TxSender(pub Arc>); @@ -237,101 +242,29 @@ impl std::fmt::Debug for TxSender { impl TxSender { #[tracing::instrument(skip(self, tx))] - pub fn submit_tx(&self, tx: L2Tx) -> Result { + pub async fn submit_tx(&self, tx: L2Tx) -> Result { if let Some(rate_limiter) = &self.0.rate_limiter { if rate_limiter.check().is_err() { return Err(SubmitTxError::RateLimitExceeded); } } - let mut stage_started_at = Instant::now(); - - if tx.common_data.fee.gas_limit > U256::from(u32::MAX) - || tx.common_data.fee.gas_per_pubdata_limit > U256::from(u32::MAX) - { - return Err(SubmitTxError::GasLimitIsTooBig); - } - - let _maximal_allowed_overhead = 0; - - if tx.common_data.fee.gas_limit - > U256::from(self.0.sender_config.max_allowed_l2_tx_gas_limit) - { - vlog::info!( - "Submitted Tx is Unexecutable {:?} because of GasLimitIsTooBig {}", - tx.hash(), - tx.common_data.fee.gas_limit, - ); - return Err(SubmitTxError::GasLimitIsTooBig); - } - if tx.common_data.fee.max_fee_per_gas < self.0.sender_config.fair_l2_gas_price.into() { - vlog::info!( - "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", - tx.hash(), - tx.common_data.fee.max_fee_per_gas - ); - return Err(SubmitTxError::MaxFeePerGasTooLow); - } - if tx.common_data.fee.max_fee_per_gas < tx.common_data.fee.max_priority_fee_per_gas { - vlog::info!( - "Submitted Tx is Unexecutable {:?} because of MaxPriorityFeeGreaterThanMaxFee {}", - tx.hash(), - tx.common_data.fee.max_fee_per_gas - ); - return Err(SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); - } - if tx.execute.factory_deps_length() > MAX_NEW_FACTORY_DEPS { - return Err(SubmitTxError::TooManyFactoryDependencies( - tx.execute.factory_deps_length(), - MAX_NEW_FACTORY_DEPS, - )); - } - - let l1_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); - - let (_, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( - l1_gas_price, - self.0.sender_config.fair_l2_gas_price, - ); - - let intrinsic_constants = get_intrinsic_constants(); - if tx.common_data.fee.gas_limit - < U256::from(intrinsic_constants.l2_tx_intrinsic_gas) - + U256::from(intrinsic_constants.l2_tx_intrinsic_pubdata) - * min( - U256::from(gas_per_pubdata_byte), - tx.common_data.fee.gas_per_pubdata_limit, - ) - { - return Err(SubmitTxError::IntrinsicGas); - } - - // We still double-check the nonce manually - // to make sure that only the correct nonce is submitted and the transaction's hashes never repeat - self.validate_account_nonce(&tx)?; - - // Even though without enough balance the tx will not pass anyway - // we check the user for enough balance explicitly here for better DevEx. - self.validate_enough_balance(&tx)?; + let mut stage_started_at = Instant::now(); + self.validate_tx(&tx).await?; metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "1_validate"); stage_started_at = Instant::now(); - let l1_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); - let fair_l2_gas_price = self.0.sender_config.fair_l2_gas_price; - - let (tx_metrics, _) = execute_tx_with_pending_state( - &self.0.replica_connection_pool, + let shared_args = self.shared_args(); + let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let (_, tx_metrics) = execute_tx_with_pending_state( + &vm_permit, + shared_args.clone(), + TxExecutionArgs::for_validation(&tx), + self.0.replica_connection_pool.clone(), tx.clone().into(), - AccountTreeId::new(self.0.sender_config.fee_account_addr), - TxExecutionMode::VerifyExecute, - Some(tx.nonce()), - U256::zero(), - l1_gas_price, - fair_l2_gas_price, - Some(tx.common_data.fee.max_fee_per_gas.as_u64()), - &self.0.playground_base_system_contracts, - &mut Default::default(), - ); + &mut HashMap::new(), + ) + .await; vlog::info!( "Submit tx {:?} with execution metrics {:?}", @@ -341,19 +274,16 @@ impl TxSender { metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "2_dry_run"); stage_started_at = Instant::now(); - let validation_result = validate_tx_with_pending_state( - &self.0.replica_connection_pool, - tx.clone(), - AccountTreeId::new(self.0.sender_config.fee_account_addr), - TxExecutionMode::VerifyExecute, - Some(tx.nonce()), - U256::zero(), - l1_gas_price, - fair_l2_gas_price, - Some(tx.common_data.fee.max_fee_per_gas.as_u64()), - &self.0.playground_base_system_contracts, - self.0.sender_config.validation_computational_gas_limit, - ); + let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; + let validation_result = shared_args + .validate_tx_with_pending_state( + &vm_permit, + self.0.replica_connection_pool.clone(), + tx.clone(), + computational_gas_limit, + ) + .await; + drop(vm_permit); // Unblock other VMs to enter. metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "3_verify_execute"); stage_started_at = Instant::now(); @@ -362,18 +292,18 @@ impl TxSender { return Err(err.into()); } - self.ensure_tx_executable(&tx.clone().into(), &tx_metrics, true)?; + self.ensure_tx_executable(tx.clone().into(), &tx_metrics, true)?; if let Some(proxy) = &self.0.proxy { // We're running an external node: we have to proxy the transaction to the main node. // But before we do that, save the tx to cache in case someone will request it // Before it reaches the main node. - proxy.save_tx(tx.hash(), tx.clone()); - proxy.submit_tx(&tx)?; + proxy.save_tx(tx.hash(), tx.clone()).await; + proxy.submit_tx(&tx).await?; // Now, after we are sure that the tx is on the main node, remove it from cache // since we don't want to store txs that might have been replaced or otherwise removed // from the mempool. - proxy.forget_tx(tx.hash()); + proxy.forget_tx(tx.hash()).await; metrics::histogram!("api.web3.submit_tx", stage_started_at.elapsed(), "stage" => "4_tx_proxy"); metrics::counter!("server.processed_txs", 1, "stage" => "proxied"); return Ok(L2TxSubmissionResult::Proxied); @@ -386,15 +316,17 @@ impl TxSender { let nonce = tx.common_data.nonce.0; let hash = tx.hash(); - let expected_nonce = self.get_expected_nonce(&tx); + let expected_nonce = self.get_expected_nonce(&tx).await; let submission_res_handle = self .0 .master_connection_pool .as_ref() .unwrap() // Checked above - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_dal() - .insert_transaction_l2(tx, tx_metrics); + .insert_transaction_l2(tx, tx_metrics) + .await; let status: String; let submission_result = match submission_res_handle { @@ -429,8 +361,83 @@ impl TxSender { submission_result } - fn validate_account_nonce(&self, tx: &L2Tx) -> Result<(), SubmitTxError> { - let expected_nonce = self.get_expected_nonce(tx); + fn shared_args(&self) -> TxSharedArgs { + TxSharedArgs { + operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), + l1_gas_price: self.0.l1_gas_price_source.estimate_effective_gas_price(), + fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, + base_system_contracts: self.0.playground_base_system_contracts.clone(), + factory_deps_cache: self.0.factory_deps_cache.clone(), + } + } + + async fn validate_tx(&self, tx: &L2Tx) -> Result<(), SubmitTxError> { + let max_gas = U256::from(u32::MAX); + if tx.common_data.fee.gas_limit > max_gas + || tx.common_data.fee.gas_per_pubdata_limit > max_gas + { + return Err(SubmitTxError::GasLimitIsTooBig); + } + + if tx.common_data.fee.gas_limit > self.0.sender_config.max_allowed_l2_tx_gas_limit.into() { + vlog::info!( + "Submitted Tx is Unexecutable {:?} because of GasLimitIsTooBig {}", + tx.hash(), + tx.common_data.fee.gas_limit, + ); + return Err(SubmitTxError::GasLimitIsTooBig); + } + if tx.common_data.fee.max_fee_per_gas < self.0.sender_config.fair_l2_gas_price.into() { + vlog::info!( + "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", + tx.hash(), + tx.common_data.fee.max_fee_per_gas + ); + return Err(SubmitTxError::MaxFeePerGasTooLow); + } + if tx.common_data.fee.max_fee_per_gas < tx.common_data.fee.max_priority_fee_per_gas { + vlog::info!( + "Submitted Tx is Unexecutable {:?} because of MaxPriorityFeeGreaterThanMaxFee {}", + tx.hash(), + tx.common_data.fee.max_fee_per_gas + ); + return Err(SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } + if tx.execute.factory_deps_length() > MAX_NEW_FACTORY_DEPS { + return Err(SubmitTxError::TooManyFactoryDependencies( + tx.execute.factory_deps_length(), + MAX_NEW_FACTORY_DEPS, + )); + } + + let l1_gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); + let (_, gas_per_pubdata_byte) = derive_base_fee_and_gas_per_pubdata( + l1_gas_price, + self.0.sender_config.fair_l2_gas_price, + ); + let effective_gas_per_pubdata = cmp::min( + tx.common_data.fee.gas_per_pubdata_limit, + gas_per_pubdata_byte.into(), + ); + + let intrinsic_consts = get_intrinsic_constants(); + let min_gas_limit = U256::from(intrinsic_consts.l2_tx_intrinsic_gas) + + U256::from(intrinsic_consts.l2_tx_intrinsic_pubdata) * effective_gas_per_pubdata; + if tx.common_data.fee.gas_limit < min_gas_limit { + return Err(SubmitTxError::IntrinsicGas); + } + + // We still double-check the nonce manually + // to make sure that only the correct nonce is submitted and the transaction's hashes never repeat + self.validate_account_nonce(tx).await?; + // Even though without enough balance the tx will not pass anyway + // we check the user for enough balance explicitly here for better DevEx. + self.validate_enough_balance(tx).await?; + Ok(()) + } + + async fn validate_account_nonce(&self, tx: &L2Tx) -> Result<(), SubmitTxError> { + let expected_nonce = self.get_expected_nonce(tx).await; if tx.common_data.nonce.0 < expected_nonce.0 { Err(SubmitTxError::NonceIsTooLow( @@ -438,34 +445,41 @@ impl TxSender { expected_nonce.0 + self.0.sender_config.max_nonce_ahead, tx.nonce().0, )) - } else if !(expected_nonce.0..=(expected_nonce.0 + self.0.sender_config.max_nonce_ahead)) - .contains(&tx.common_data.nonce.0) - { - Err(SubmitTxError::NonceIsTooHigh( - expected_nonce.0, - expected_nonce.0 + self.0.sender_config.max_nonce_ahead, - tx.nonce().0, - )) } else { - Ok(()) + let max_nonce = expected_nonce.0 + self.0.sender_config.max_nonce_ahead; + if !(expected_nonce.0..=max_nonce).contains(&tx.common_data.nonce.0) { + Err(SubmitTxError::NonceIsTooHigh( + expected_nonce.0, + max_nonce, + tx.nonce().0, + )) + } else { + Ok(()) + } } } - fn get_expected_nonce(&self, tx: &L2Tx) -> Nonce { - self.0 + async fn get_expected_nonce(&self, tx: &L2Tx) -> Nonce { + let mut connection = self + .0 .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await; + + let latest_block_number = connection + .blocks_web3_dal() + .get_sealed_miniblock_number() + .await + .unwrap(); + let nonce = connection .storage_web3_dal() - .get_address_historical_nonce( - tx.initiator_account(), - api::BlockId::Number(api::BlockNumber::Latest), - ) - .unwrap() - .map(|n| Nonce(n.as_u32())) - .unwrap() + .get_address_historical_nonce(tx.initiator_account(), latest_block_number) + .await + .unwrap(); + Nonce(nonce.as_u32()) } - fn validate_enough_balance(&self, tx: &L2Tx) -> Result<(), SubmitTxError> { + async fn validate_enough_balance(&self, tx: &L2Tx) -> Result<(), SubmitTxError> { let paymaster = tx.common_data.paymaster_params.paymaster; // The paymaster is expected to pay for the tx, @@ -474,10 +488,10 @@ impl TxSender { return Ok(()); } - let balance = self.get_balance(&tx.common_data.initiator_address); + let balance = self.get_balance(&tx.common_data.initiator_address).await; // Estimate the minimum fee price user will agree to. - let gas_price = std::cmp::min( + let gas_price = cmp::min( tx.common_data.fee.max_fee_per_gas, U256::from(self.0.sender_config.fair_l2_gas_price) + tx.common_data.fee.max_priority_fee_per_gas, @@ -496,21 +510,87 @@ impl TxSender { } } - fn get_balance(&self, initiator_address: &H160) -> U256 { + async fn get_balance(&self, initiator_address: &H160) -> U256 { let eth_balance_key = storage_key_for_eth_balance(initiator_address); let balance = self .0 .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .storage_dal() .get_by_key(ð_balance_key) + .await .unwrap_or_default(); h256_to_u256(balance) } - pub fn get_txs_fee_in_wei( + /// Given the gas_limit to be used for the body of the transaction, + /// returns the result for executing the transaction with such gas_limit + #[allow(clippy::too_many_arguments)] + async fn estimate_gas_step( + &self, + vm_permit: &VmPermit<'_>, + mut tx: Transaction, + gas_per_pubdata_byte: u64, + tx_gas_limit: u32, + l1_gas_price: u64, + base_fee: u64, + storage_read_cache: &mut HashMap, + ) -> Result { + let gas_limit_with_overhead = tx_gas_limit + + derive_overhead( + tx_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + ); + + match &mut tx.common_data { + ExecuteTransactionCommon::L1(l1_common_data) => { + l1_common_data.gas_limit = gas_limit_with_overhead.into(); + let required_funds = + l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + l1_common_data.to_mint = required_funds; + } + ExecuteTransactionCommon::L2(l2_common_data) => { + l2_common_data.fee.gas_limit = gas_limit_with_overhead.into(); + } + } + + let shared_args = self.shared_args_for_gas_estimate(l1_gas_price); + let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; + let execution_args = + TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee); + let (exec_result, tx_metrics) = execute_tx_with_pending_state( + vm_permit, + shared_args, + execution_args, + self.0.replica_connection_pool.clone(), + tx.clone(), + storage_read_cache, + ) + .await; + + if let Err(err) = self.ensure_tx_executable(tx, &tx_metrics, false) { + let SubmitTxError::Unexecutable(message) = err else { unreachable!() }; + return Err(SandboxExecutionError::Unexecutable(message)); + } + exec_result + } + + fn shared_args_for_gas_estimate(&self, l1_gas_price: u64) -> TxSharedArgs { + TxSharedArgs { + operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), + l1_gas_price, + fair_l2_gas_price: self.0.sender_config.fair_l2_gas_price, + base_system_contracts: self.0.estimate_fee_base_system_contracts.clone(), + factory_deps_cache: self.0.factory_deps_cache.clone(), + } + } + + pub async fn get_txs_fee_in_wei( &self, mut tx: Transaction, estimated_fee_scale_factor: f64, @@ -552,14 +632,16 @@ impl TxSender { let account_code_hash = self .0 .replica_connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .storage_dal() .get_by_key(&hashed_key) + .await .unwrap_or_default(); if !tx.is_l1() && account_code_hash == H256::zero() - && tx.execute.value > self.get_balance(&tx.initiator_account()) + && tx.execute.value > self.get_balance(&tx.initiator_account()).await { vlog::info!( "fee estimation failed on validation step. @@ -588,8 +670,10 @@ impl TxSender { } else { let pubdata_for_factory_deps = get_pubdata_for_factory_deps( &self.0.replica_connection_pool, - &tx.execute.factory_deps, - ); + tx.execute.factory_deps.as_deref().unwrap_or_default(), + self.0.factory_deps_cache.clone(), + ) + .await; if pubdata_for_factory_deps > MAX_PUBDATA_PER_BLOCK { return Err(SubmitTxError::Unexecutable( "exceeds limit for published pubdata".to_string(), @@ -599,7 +683,7 @@ impl TxSender { }; // Rolling cache with storage values that were read from the DB. - let mut storage_read_cache = Default::default(); + let mut storage_read_cache = HashMap::new(); // We are using binary search to find the minimal values of gas_limit under which // the transaction succeedes @@ -615,75 +699,9 @@ impl TxSender { tx_id, estimation_started_at.elapsed(), ); - // Given the gas_limit to be used for the body of the transaction, - // returns the result for executing the transaction with such gas_limit - let mut execute = |tx_gas_limit: u32| { - let gas_limit_with_overhead = tx_gas_limit - + derive_overhead( - tx_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - OverheadCoeficients::from_tx_type(tx.tx_format() as u8), - ); - match &mut tx.common_data { - ExecuteTransactionCommon::L1(l1_common_data) => { - l1_common_data.gas_limit = gas_limit_with_overhead.into(); - - let required_funds = l1_common_data.gas_limit * l1_common_data.max_fee_per_gas - + tx.execute.value; - - l1_common_data.to_mint = required_funds; - } - ExecuteTransactionCommon::L2(l2_common_data) => { - l2_common_data.fee.gas_limit = gas_limit_with_overhead.into(); - } - } - - let enforced_nonce = match &tx.common_data { - ExecuteTransactionCommon::L2(data) => Some(data.nonce), - _ => None, - }; - - // For L2 transactions we need to explicitly put enough balance into the account of the users - // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &tx.common_data { - ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, - _ => U256::zero(), - }; - - let (tx_metrics, exec_result) = execute_tx_with_pending_state( - &self.0.replica_connection_pool, - tx.clone(), - AccountTreeId::new(self.0.sender_config.fee_account_addr), - TxExecutionMode::EstimateFee { - missed_storage_invocation_limit: self - .0 - .sender_config - .vm_execution_cache_misses_limit - .unwrap_or(usize::MAX), - }, - enforced_nonce, - added_balance, - l1_gas_price, - self.0.sender_config.fair_l2_gas_price, - Some(base_fee), - &self.0.estimate_fee_base_system_contracts, - &mut storage_read_cache, - ); - - self.ensure_tx_executable(&tx, &tx_metrics, false) - .map_err(|err| { - let err_message = match err { - SubmitTxError::Unexecutable(err_message) => err_message, - _ => unreachable!(), - }; - - SandboxExecutionError::Unexecutable(err_message) - })?; - - exec_result - }; + // Acquire the vm token for the whole duration of the binary search. + let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let mut number_of_iterations = 0usize; while lower_bound + acceptable_overestimation < upper_bound { let mid = (lower_bound + upper_bound) / 2; @@ -691,7 +709,19 @@ impl TxSender { // or normal exeuction errors, so we just hope that increasing the // gas limit will make the transaction successful let iteration_started_at = Instant::now(); - if execute(gas_for_bytecodes_pubdata + mid).is_err() { + let try_gas_limit = gas_for_bytecodes_pubdata + mid; + let result = self + .estimate_gas_step( + &vm_permit, + tx.clone(), + gas_per_pubdata_byte, + try_gas_limit, + l1_gas_price, + base_fee, + &mut storage_read_cache, + ) + .await; + if result.is_err() { lower_bound = mid + 1; } else { upper_bound = mid; @@ -712,16 +742,30 @@ impl TxSender { number_of_iterations as f64 ); - let tx_body_gas_limit = std::cmp::min( + let tx_body_gas_limit = cmp::min( MAX_L2_TX_GAS_LIMIT as u32, ((upper_bound as f64) * estimated_fee_scale_factor) as u32, ); - match execute(tx_body_gas_limit + gas_for_bytecodes_pubdata) { + let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata; + let result = self + .estimate_gas_step( + &vm_permit, + tx.clone(), + gas_per_pubdata_byte, + suggested_gas_limit, + l1_gas_price, + base_fee, + &mut storage_read_cache, + ) + .await; + + drop(vm_permit); // Unblock other VMs to enter. + match result { Err(err) => Err(err.into()), Ok(_) => { let overhead = derive_overhead( - tx_body_gas_limit + gas_for_bytecodes_pubdata, + suggested_gas_limit, gas_per_pubdata_byte as u32, tx.encoding_len(), OverheadCoeficients::from_tx_type(tx.tx_format() as u8), @@ -729,13 +773,13 @@ impl TxSender { let full_gas_limit = match tx_body_gas_limit.overflowing_add(gas_for_bytecodes_pubdata + overhead) { + (value, false) => value, (_, true) => { return Err(SubmitTxError::ExecutionReverted( "exceeds block gas limit".to_string(), vec![], - )) + )); } - (x, _) => x, }; Ok(Fee { @@ -748,19 +792,48 @@ impl TxSender { } } + pub(super) async fn eth_call( + &self, + block_args: BlockArgs, + tx: L2Tx, + ) -> Result, SubmitTxError> { + let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; + let result = execute_tx_eth_call( + &vm_permit, + self.shared_args(), + self.0.replica_connection_pool.clone(), + tx, + block_args, + vm_execution_cache_misses_limit, + false, + ) + .await?; + drop(vm_permit); // Unblock other VMs to enter. + + Ok(match result.revert_reason { + Some(result) => result.original_data, + None => result + .return_data + .into_iter() + .flat_map(<[u8; 32]>::from) + .collect(), + }) + } + pub fn gas_price(&self) -> u64 { let gas_price = self.0.l1_gas_price_source.estimate_effective_gas_price(); - - derive_base_fee_and_gas_per_pubdata( - (gas_price as f64 * self.0.sender_config.gas_price_scale_factor).round() as u64, + let l1_gas_price = (gas_price as f64 * self.0.sender_config.gas_price_scale_factor).round(); + let (base_fee, _) = derive_base_fee_and_gas_per_pubdata( + l1_gas_price as u64, self.0.sender_config.fair_l2_gas_price, - ) - .0 + ); + base_fee } fn ensure_tx_executable( &self, - transaction: &Transaction, + transaction: Transaction, tx_metrics: &TransactionExecutionMetrics, log_message: bool, ) -> Result<(), SubmitTxError> { @@ -771,58 +844,23 @@ impl TxSender { return Ok(()); }; - let execution_metrics = ExecutionMetrics { - published_bytecode_bytes: tx_metrics.published_bytecode_bytes, - l2_l1_long_messages: tx_metrics.l2_l1_long_messages, - l2_l1_logs: tx_metrics.l2_l1_logs, - contracts_deployed: tx_metrics.contracts_deployed, - contracts_used: tx_metrics.contracts_used, - gas_used: tx_metrics.gas_used, - storage_logs: tx_metrics.storage_logs, - vm_events: tx_metrics.vm_events, - total_log_queries: tx_metrics.total_log_queries, - cycles_used: tx_metrics.cycles_used, - computational_gas_used: tx_metrics.computational_gas_used, - }; - let writes_metrics = DeduplicatedWritesMetrics { - initial_storage_writes: tx_metrics.initial_storage_writes, - repeated_storage_writes: tx_metrics.repeated_storage_writes, + // Hash is not computable for the provided `transaction` during gas estimation (it doesn't have + // its input data set). Since we don't log a hash in this case anyway, we just use a dummy value. + let tx_hash = if log_message { + transaction.hash() + } else { + H256::zero() }; - // In api server it's ok to expect that all writes are initial it's safer - let tx_gas_count = gas_count_from_tx_and_metrics(&transaction.clone(), &execution_metrics) - + gas_count_from_writes(&writes_metrics); - let tx_data: TransactionData = transaction.clone().into(); - let tx_encoding_size = tx_data.into_tokens().len(); - - for sealer in &ConditionalSealer::get_default_sealers() { - let seal_resolution = sealer.should_seal( - sk_config, - 0u128, - 1, - execution_metrics, - execution_metrics, - tx_gas_count, - tx_gas_count, - tx_encoding_size, - tx_encoding_size, - writes_metrics, - writes_metrics, + let seal_data = SealData::for_transaction(transaction, tx_metrics); + if let Some(reason) = ConditionalSealer::find_unexecutable_reason(sk_config, &seal_data) { + let message = format!( + "Tx is Unexecutable because of {reason}; inputs for decision: {seal_data:?}" ); - if matches!(seal_resolution, SealResolution::Unexecutable(_)) { - let message = format!( - "Tx is Unexecutable because of {} with execution values {:?} and gas {:?}", - sealer.prom_criterion_name(), - execution_metrics, - tx_gas_count - ); - - if log_message { - vlog::info!("{:#?} {}", transaction.hash(), message); - } - - return Err(SubmitTxError::Unexecutable(message)); + if log_message { + vlog::info!("{tx_hash:#?} {message}"); } + return Err(SubmitTxError::Unexecutable(message)); } Ok(()) } diff --git a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs index c53f17fdf989..1f1a4b63993d 100644 --- a/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/bin/zksync_core/src/api_server/tx_sender/proxy.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use std::future::Future; -use std::sync::RwLock; +use tokio::sync::RwLock; use zksync_types::{ api::{BlockId, Transaction, TransactionDetails, TransactionId, TransactionReceipt}, @@ -8,9 +7,9 @@ use zksync_types::{ H256, }; use zksync_web3_decl::{ - jsonrpsee::core::RpcResult, jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::{EthNamespaceClient, ZksNamespaceClient}, + RpcResult, }; /// Used by external node to proxy transaction to the main node @@ -18,69 +17,58 @@ use zksync_web3_decl::{ #[derive(Debug)] pub struct TxProxy { tx_cache: RwLock>, - main_node_url: String, + client: HttpClient, } impl TxProxy { - pub fn new(main_node_url: String) -> Self { + pub fn new(main_node_url: &str) -> Self { + let client = HttpClientBuilder::default().build(main_node_url).unwrap(); Self { - main_node_url, + client, tx_cache: RwLock::new(HashMap::new()), } } - pub fn find_tx(&self, tx_hash: H256) -> Option { - self.tx_cache.read().unwrap().get(&tx_hash).cloned() + pub async fn find_tx(&self, tx_hash: H256) -> Option { + self.tx_cache.read().await.get(&tx_hash).cloned() } - pub fn forget_tx(&self, tx_hash: H256) { - self.tx_cache.write().unwrap().remove(&tx_hash); + pub async fn forget_tx(&self, tx_hash: H256) { + self.tx_cache.write().await.remove(&tx_hash); } - pub fn save_tx(&self, tx_hash: H256, tx: L2Tx) { - self.tx_cache.write().unwrap().insert(tx_hash, tx); + pub async fn save_tx(&self, tx_hash: H256, tx: L2Tx) { + self.tx_cache.write().await.insert(tx_hash, tx); } - fn proxy_request(&self, request: R) -> RpcResult - where - T: Send, - F: Send + Future>, - R: 'static + Send + FnOnce(HttpClient) -> F, - { - let main_node_url = self.main_node_url.clone(); - crate::block_on(async move { - // Clients are tied to the runtime they are created in, so we have to create it here. - let client = HttpClientBuilder::default().build(&main_node_url).unwrap(); - request(client).await - }) - } - - pub fn submit_tx(&self, tx: &L2Tx) -> RpcResult { - let raw_tx = zksync_types::Bytes(tx.common_data.input_data().expect("raw tx is absent")); + pub async fn submit_tx(&self, tx: &L2Tx) -> RpcResult { + let input_data = tx.common_data.input_data().expect("raw tx is absent"); + let raw_tx = zksync_types::Bytes(input_data.to_vec()); vlog::info!("Proxying tx {}", tx.hash()); - self.proxy_request(|client| async move { client.send_raw_transaction(raw_tx).await }) + self.client.send_raw_transaction(raw_tx).await } - pub fn request_tx(&self, id: TransactionId) -> RpcResult> { - self.proxy_request(move |client| async move { - match id { - TransactionId::Block(BlockId::Hash(block), index) => { - client.get_transaction_by_block_hash_and_index(block, index) - } - TransactionId::Block(BlockId::Number(block), index) => { - client.get_transaction_by_block_number_and_index(block, index) - } - TransactionId::Hash(hash) => client.get_transaction_by_hash(hash), + pub async fn request_tx(&self, id: TransactionId) -> RpcResult> { + match id { + TransactionId::Block(BlockId::Hash(block), index) => { + self.client + .get_transaction_by_block_hash_and_index(block, index) + .await } - .await - }) + TransactionId::Block(BlockId::Number(block), index) => { + self.client + .get_transaction_by_block_number_and_index(block, index) + .await + } + TransactionId::Hash(hash) => self.client.get_transaction_by_hash(hash).await, + } } - pub fn request_tx_details(&self, hash: H256) -> RpcResult> { - self.proxy_request(move |client| async move { client.get_transaction_details(hash).await }) + pub async fn request_tx_details(&self, hash: H256) -> RpcResult> { + self.client.get_transaction_details(hash).await } - pub fn request_tx_receipt(&self, hash: H256) -> RpcResult> { - self.proxy_request(move |client| async move { client.get_transaction_receipt(hash).await }) + pub async fn request_tx_receipt(&self, hash: H256) -> RpcResult> { + self.client.get_transaction_receipt(hash).await } } diff --git a/core/bin/zksync_core/src/api_server/web3/api_health_check.rs b/core/bin/zksync_core/src/api_server/web3/api_health_check.rs new file mode 100644 index 000000000000..08bd73ecd547 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/api_health_check.rs @@ -0,0 +1,26 @@ +use async_trait::async_trait; +use tokio::sync::watch; +use zksync_health_check::{CheckHealth, CheckHealthStatus}; + +/// HealthCheck used to verify if the Api is ready. +/// Used in the /health endpoint +#[derive(Clone, Debug)] +pub struct ApiHealthCheck { + receiver: watch::Receiver, +} + +impl ApiHealthCheck { + pub(super) fn new(receiver: watch::Receiver) -> ApiHealthCheck { + ApiHealthCheck { receiver } + } +} + +#[async_trait] +impl CheckHealth for ApiHealthCheck { + async fn check_health(&self) -> CheckHealthStatus { + match *self.receiver.borrow() { + CheckHealthStatus::Ready => CheckHealthStatus::Ready, + CheckHealthStatus::NotReady(ref error) => CheckHealthStatus::NotReady(error.clone()), + } + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs index 16720abaf6dc..2f26728d07c7 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs @@ -12,7 +12,8 @@ pub fn into_jsrpc_error(err: Web3Error) -> Error { | Web3Error::TooManyTopics | Web3Error::FilterNotFound | Web3Error::InvalidFeeParams(_) - | Web3Error::LogsLimitExceeded(_, _, _) => ErrorCode::InvalidParams, + | Web3Error::LogsLimitExceeded(_, _, _) + | Web3Error::InvalidFilterBlockHash => ErrorCode::InvalidParams, Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3.into(), Web3Error::PubSubTimeout => 4.into(), Web3Error::RequestTimeout => 5.into(), diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs index b62e026e4887..9cbe67e6c074 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs @@ -1,59 +1,44 @@ // External uses use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; -use crate::api_server::web3::namespaces::debug::DebugNamespace; -use jsonrpc_core::Result; +use crate::api_server::web3::namespaces::DebugNamespace; +use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use serde::{Deserialize, Serialize}; -use zksync_types::api::{BlockId, BlockNumber, DebugCall, ResultDebugCall}; -use zksync_types::transaction_request::CallRequest; -use zksync_types::H256; -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum SupportedTracers { - CallTracer, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct CallTracerConfig { - pub only_top_call: bool, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TracerConfig { - pub tracer: SupportedTracers, - pub tracer_config: CallTracerConfig, -} +use zksync_types::{ + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig, H256}, + transaction_request::CallRequest, +}; #[rpc] pub trait DebugNamespaceT { - #[rpc(name = "debug_traceBlockByNumber", returns = "Vec")] + #[rpc(name = "debug_traceBlockByNumber")] fn trace_block_by_number( &self, block: BlockNumber, options: Option, - ) -> Result>; - #[rpc(name = "debug_traceBlockByHash", returns = "Vec")] + ) -> BoxFuture>>; + + #[rpc(name = "debug_traceBlockByHash")] fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> Result>; - #[rpc(name = "debug_traceCall", returns = "DebugCall")] + ) -> BoxFuture>>; + + #[rpc(name = "debug_traceCall")] fn trace_call( &self, request: CallRequest, block: Option, options: Option, - ) -> Result; - #[rpc(name = "debug_traceTransaction", returns = "DebugCall")] + ) -> BoxFuture>; + + #[rpc(name = "debug_traceTransaction")] fn trace_transaction( &self, tx_hash: H256, options: Option, - ) -> Result>; + ) -> BoxFuture>>; } impl DebugNamespaceT for DebugNamespace { @@ -61,18 +46,28 @@ impl DebugNamespaceT for DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> Result> { - self.debug_trace_block_impl(BlockId::Number(block), options) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .debug_trace_block_impl(BlockId::Number(block), options) + .await + .map_err(into_jsrpc_error) + }) } fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> Result> { - self.debug_trace_block_impl(BlockId::Hash(hash), options) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .debug_trace_block_impl(BlockId::Hash(hash), options) + .await + .map_err(into_jsrpc_error) + }) } fn trace_call( @@ -80,16 +75,22 @@ impl DebugNamespaceT for DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> Result { - self.debug_trace_call_impl(request, block, options) - .map_err(into_jsrpc_error) + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .debug_trace_call_impl(request, block, options) + .await + .map_err(into_jsrpc_error) + }) } fn trace_transaction( &self, tx_hash: H256, options: Option, - ) -> Result> { - Ok(self.debug_trace_transaction_impl(tx_hash, options)) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.debug_trace_transaction_impl(tx_hash, options).await) }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs new file mode 100644 index 000000000000..e75d7caade29 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs @@ -0,0 +1,40 @@ +// Built-in uses + +// External uses +use jsonrpc_core::{BoxFuture, Result}; +use jsonrpc_derive::rpc; + +// Workspace uses +use zksync_types::{api::en::SyncBlock, MiniblockNumber}; + +// Local uses +use crate::{ + api_server::web3::{backend_jsonrpc::error::into_jsrpc_error, EnNamespace}, + l1_gas_price::L1GasPriceProvider, +}; + +#[rpc] +pub trait EnNamespaceT { + #[rpc(name = "en_syncL2Block")] + fn sync_l2_block( + &self, + block_number: MiniblockNumber, + include_transactions: bool, + ) -> BoxFuture>>; +} + +impl EnNamespaceT for EnNamespace { + fn sync_l2_block( + &self, + block_number: MiniblockNumber, + include_transactions: bool, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .sync_l2_block_impl(block_number, include_transactions) + .await + .map_err(into_jsrpc_error) + }) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs index 2555816b384a..db2190919c5f 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs @@ -1,7 +1,7 @@ // Built-in uses // External uses -use jsonrpc_core::Result; +use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; // Workspace uses @@ -23,252 +23,338 @@ use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into #[rpc] pub trait EthNamespaceT { - #[rpc(name = "eth_blockNumber", returns = "U64")] - fn get_block_number(&self) -> Result; + #[rpc(name = "eth_blockNumber")] + fn get_block_number(&self) -> BoxFuture>; - #[rpc(name = "eth_chainId", returns = "U64")] - fn chain_id(&self) -> Result; + #[rpc(name = "eth_chainId")] + fn chain_id(&self) -> BoxFuture>; - #[rpc(name = "eth_call", returns = "Bytes")] - fn call(&self, req: CallRequest, block: Option) -> Result; + #[rpc(name = "eth_call")] + fn call(&self, req: CallRequest, block: Option) -> BoxFuture>; - #[rpc(name = "eth_estimateGas", returns = "U256")] - fn estimate_gas(&self, req: CallRequest, _block: Option) -> Result; + #[rpc(name = "eth_estimateGas")] + fn estimate_gas( + &self, + req: CallRequest, + _block: Option, + ) -> BoxFuture>; - #[rpc(name = "eth_gasPrice", returns = "U256")] - fn gas_price(&self) -> Result; + #[rpc(name = "eth_gasPrice")] + fn gas_price(&self) -> BoxFuture>; - #[rpc(name = "eth_newFilter", returns = "U256")] - fn new_filter(&self, filter: Filter) -> Result; + #[rpc(name = "eth_newFilter")] + fn new_filter(&self, filter: Filter) -> BoxFuture>; - #[rpc(name = "eth_newBlockFilter", returns = "U256")] - fn new_block_filter(&self) -> Result; + #[rpc(name = "eth_newBlockFilter")] + fn new_block_filter(&self) -> BoxFuture>; - #[rpc(name = "eth_uninstallFilter", returns = "U256")] - fn uninstall_filter(&self, idx: U256) -> Result; + #[rpc(name = "eth_uninstallFilter")] + fn uninstall_filter(&self, idx: U256) -> BoxFuture>; - #[rpc(name = "eth_newPendingTransactionFilter", returns = "U256")] - fn new_pending_transaction_filter(&self) -> Result; + #[rpc(name = "eth_newPendingTransactionFilter")] + fn new_pending_transaction_filter(&self) -> BoxFuture>; - #[rpc(name = "eth_getLogs", returns = "Vec")] - fn get_logs(&self, filter: Filter) -> Result>; + #[rpc(name = "eth_getLogs")] + fn get_logs(&self, filter: Filter) -> BoxFuture>>; - #[rpc(name = "eth_getFilterLogs", returns = "FilterChanges")] - fn get_filter_logs(&self, filter_index: U256) -> Result; + #[rpc(name = "eth_getFilterLogs")] + fn get_filter_logs(&self, filter_index: U256) -> BoxFuture>; - #[rpc(name = "eth_getFilterChanges", returns = "FilterChanges")] - fn get_filter_changes(&self, filter_index: U256) -> Result; + #[rpc(name = "eth_getFilterChanges")] + fn get_filter_changes(&self, filter_index: U256) -> BoxFuture>; - #[rpc(name = "eth_getBalance", returns = "U256")] - fn get_balance(&self, address: Address, block: Option) -> Result; + #[rpc(name = "eth_getBalance")] + fn get_balance( + &self, + address: Address, + block: Option, + ) -> BoxFuture>; - #[rpc( - name = "eth_getBlockByNumber", - returns = "Option>" - )] + #[rpc(name = "eth_getBlockByNumber")] fn get_block_by_number( &self, block_number: BlockNumber, full_transactions: bool, - ) -> Result>>; + ) -> BoxFuture>>>; - #[rpc( - name = "eth_getBlockByHash", - returns = "Option>" - )] + #[rpc(name = "eth_getBlockByHash")] fn get_block_by_hash( &self, hash: H256, full_transactions: bool, - ) -> Result>>; + ) -> BoxFuture>>>; - #[rpc( - name = "eth_getBlockTransactionCountByNumber", - returns = "Option" - )] + #[rpc(name = "eth_getBlockTransactionCountByNumber")] fn get_block_transaction_count_by_number( &self, block_number: BlockNumber, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc(name = "eth_getBlockTransactionCountByHash", returns = "Option")] - fn get_block_transaction_count_by_hash(&self, block_hash: H256) -> Result>; + #[rpc(name = "eth_getBlockTransactionCountByHash")] + fn get_block_transaction_count_by_hash( + &self, + block_hash: H256, + ) -> BoxFuture>>; - #[rpc(name = "eth_getCode", returns = "Bytes")] - fn get_code(&self, address: Address, block: Option) -> Result; + #[rpc(name = "eth_getCode")] + fn get_code(&self, address: Address, block: Option) + -> BoxFuture>; - #[rpc(name = "eth_getStorageAt", returns = "H256")] + #[rpc(name = "eth_getStorageAt")] fn get_storage( &self, address: Address, idx: U256, block: Option, - ) -> Result; + ) -> BoxFuture>; - #[rpc(name = "eth_getTransactionCount", returns = "U256")] + #[rpc(name = "eth_getTransactionCount")] fn get_transaction_count( &self, address: Address, block: Option, - ) -> Result; + ) -> BoxFuture>; - #[rpc(name = "eth_getTransactionByHash", returns = "Option")] - fn get_transaction_by_hash(&self, hash: H256) -> Result>; + #[rpc(name = "eth_getTransactionByHash")] + fn get_transaction_by_hash(&self, hash: H256) -> BoxFuture>>; - #[rpc( - name = "eth_getTransactionByBlockHashAndIndex", - returns = "Option" - )] + #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] fn get_transaction_by_block_hash_and_index( &self, block_hash: H256, index: Index, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc( - name = "eth_getTransactionByBlockNumberAndIndex", - returns = "Option" - )] + #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] fn get_transaction_by_block_number_and_index( &self, block_number: BlockNumber, index: Index, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc( - name = "eth_getTransactionReceipt", - returns = "Option" - )] - fn get_transaction_receipt(&self, hash: H256) -> Result>; + #[rpc(name = "eth_getTransactionReceipt")] + fn get_transaction_receipt(&self, hash: H256) -> BoxFuture>>; - #[rpc(name = "eth_protocolVersion", returns = "String")] - fn protocol_version(&self) -> Result; + #[rpc(name = "eth_protocolVersion")] + fn protocol_version(&self) -> BoxFuture>; - #[rpc(name = "eth_sendRawTransaction", returns = "H256")] - fn send_raw_transaction(&self, tx_bytes: Bytes) -> Result; + #[rpc(name = "eth_sendRawTransaction")] + fn send_raw_transaction(&self, tx_bytes: Bytes) -> BoxFuture>; - #[rpc(name = "eth_syncing", returns = "SyncState")] - fn syncing(&self) -> Result; + #[rpc(name = "eth_syncing")] + fn syncing(&self) -> BoxFuture>; - #[rpc(name = "eth_accounts", returns = "Vec
")] - fn accounts(&self) -> Result>; + #[rpc(name = "eth_accounts")] + fn accounts(&self) -> BoxFuture>>; - #[rpc(name = "eth_coinbase", returns = "Address")] - fn coinbase(&self) -> Result
; + #[rpc(name = "eth_coinbase")] + fn coinbase(&self) -> BoxFuture>; - #[rpc(name = "eth_getCompilers", returns = "Vec")] - fn compilers(&self) -> Result>; + #[rpc(name = "eth_getCompilers")] + fn compilers(&self) -> BoxFuture>>; - #[rpc(name = "eth_hashrate", returns = "U256")] - fn hashrate(&self) -> Result; + #[rpc(name = "eth_hashrate")] + fn hashrate(&self) -> BoxFuture>; - #[rpc(name = "eth_getUncleCountByBlockHash", returns = "Option")] - fn get_uncle_count_by_block_hash(&self, hash: H256) -> Result>; + #[rpc(name = "eth_getUncleCountByBlockHash")] + fn get_uncle_count_by_block_hash(&self, hash: H256) -> BoxFuture>>; - #[rpc(name = "eth_getUncleCountByBlockNumber", returns = "Option")] - fn get_uncle_count_by_block_number(&self, number: BlockNumber) -> Result>; + #[rpc(name = "eth_getUncleCountByBlockNumber")] + fn get_uncle_count_by_block_number( + &self, + number: BlockNumber, + ) -> BoxFuture>>; - #[rpc(name = "eth_mining", returns = "bool")] - fn mining(&self) -> Result; + #[rpc(name = "eth_mining")] + fn mining(&self) -> BoxFuture>; - #[rpc(name = "eth_sendTransaction", returns = "H256")] + #[rpc(name = "eth_sendTransaction")] fn send_transaction( &self, transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> Result; + ) -> BoxFuture>; } impl EthNamespaceT for EthNamespace { - fn get_block_number(&self) -> Result { - self.get_block_number_impl().map_err(into_jsrpc_error) + fn get_block_number(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_number_impl() + .await + .map_err(into_jsrpc_error) + }) } - fn chain_id(&self) -> Result { - Ok(self.chain_id_impl()) + fn chain_id(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.chain_id_impl()) }) } - fn call(&self, req: CallRequest, block: Option) -> Result { - self.call_impl(req, block.map(Into::into)) - .map_err(into_jsrpc_error) + fn call(&self, req: CallRequest, block: Option) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .call_impl(req, block.map(Into::into)) + .await + .map_err(into_jsrpc_error) + }) } - fn estimate_gas(&self, req: CallRequest, block: Option) -> Result { - self.estimate_gas_impl(req, block).map_err(into_jsrpc_error) + fn estimate_gas( + &self, + req: CallRequest, + block: Option, + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .estimate_gas_impl(req, block) + .await + .map_err(into_jsrpc_error) + }) } - fn gas_price(&self) -> Result { - self.gas_price_impl().map_err(into_jsrpc_error) + fn gas_price(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { self_.gas_price_impl().map_err(into_jsrpc_error) }) } - fn new_filter(&self, filter: Filter) -> Result { - self.new_filter_impl(filter).map_err(into_jsrpc_error) + fn new_filter(&self, filter: Filter) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .new_filter_impl(filter) + .await + .map_err(into_jsrpc_error) + }) } - fn new_block_filter(&self) -> Result { - self.new_block_filter_impl().map_err(into_jsrpc_error) + fn new_block_filter(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .new_block_filter_impl() + .await + .map_err(into_jsrpc_error) + }) } - fn uninstall_filter(&self, idx: U256) -> Result { - Ok(self.uninstall_filter_impl(idx)) + fn uninstall_filter(&self, idx: U256) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.uninstall_filter_impl(idx).await) }) } - fn new_pending_transaction_filter(&self) -> Result { - Ok(self.new_pending_transaction_filter_impl()) + fn new_pending_transaction_filter(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.new_pending_transaction_filter_impl().await) }) } - fn get_logs(&self, filter: Filter) -> Result> { - self.get_logs_impl(filter).map_err(into_jsrpc_error) + fn get_logs(&self, filter: Filter) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { self_.get_logs_impl(filter).await.map_err(into_jsrpc_error) }) } - fn get_filter_logs(&self, filter_index: U256) -> Result { - self.get_filter_logs_impl(filter_index) - .map_err(into_jsrpc_error) + fn get_filter_logs(&self, filter_index: U256) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_filter_logs_impl(filter_index) + .await + .map_err(into_jsrpc_error) + }) } - fn get_filter_changes(&self, filter_index: U256) -> Result { - self.get_filter_changes_impl(filter_index) - .map_err(into_jsrpc_error) + fn get_filter_changes(&self, filter_index: U256) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_filter_changes_impl(filter_index) + .await + .map_err(into_jsrpc_error) + }) } - fn get_balance(&self, address: Address, block: Option) -> Result { - self.get_balance_impl(address, block.map(Into::into)) - .map_err(into_jsrpc_error) + fn get_balance( + &self, + address: Address, + block: Option, + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_balance_impl(address, block.map(Into::into)) + .await + .map_err(into_jsrpc_error) + }) } fn get_block_by_number( &self, block_number: BlockNumber, full_transactions: bool, - ) -> Result>> { - self.get_block_impl(BlockId::Number(block_number), full_transactions) - .map_err(into_jsrpc_error) + ) -> BoxFuture>>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_impl(BlockId::Number(block_number), full_transactions) + .await + .map_err(into_jsrpc_error) + }) } fn get_block_by_hash( &self, hash: H256, full_transactions: bool, - ) -> Result>> { - self.get_block_impl(BlockId::Hash(hash), full_transactions) - .map_err(into_jsrpc_error) + ) -> BoxFuture>>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_impl(BlockId::Hash(hash), full_transactions) + .await + .map_err(into_jsrpc_error) + }) } fn get_block_transaction_count_by_number( &self, block_number: BlockNumber, - ) -> Result> { - self.get_block_transaction_count_impl(BlockId::Number(block_number)) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_transaction_count_impl(BlockId::Number(block_number)) + .await + .map_err(into_jsrpc_error) + }) } - fn get_block_transaction_count_by_hash(&self, block_hash: H256) -> Result> { - self.get_block_transaction_count_impl(BlockId::Hash(block_hash)) - .map_err(into_jsrpc_error) + fn get_block_transaction_count_by_hash( + &self, + block_hash: H256, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_transaction_count_impl(BlockId::Hash(block_hash)) + .await + .map_err(into_jsrpc_error) + }) } - fn get_code(&self, address: Address, block: Option) -> Result { - self.get_code_impl(address, block.map(Into::into)) - .map_err(into_jsrpc_error) + fn get_code( + &self, + address: Address, + block: Option, + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_code_impl(address, block.map(Into::into)) + .await + .map_err(into_jsrpc_error) + }) } fn get_storage( @@ -276,99 +362,151 @@ impl EthNamespaceT for EthNamespa address: Address, idx: U256, block: Option, - ) -> Result { - self.get_storage_at_impl(address, idx, block.map(Into::into)) - .map_err(into_jsrpc_error) + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_storage_at_impl(address, idx, block.map(Into::into)) + .await + .map_err(into_jsrpc_error) + }) } fn get_transaction_count( &self, address: Address, block: Option, - ) -> Result { - self.get_transaction_count_impl(address, block.map(Into::into)) - .map_err(into_jsrpc_error) - } - - fn get_transaction_by_hash(&self, hash: H256) -> Result> { - self.get_transaction_impl(TransactionId::Hash(hash)) - .map_err(into_jsrpc_error) + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_count_impl(address, block.map(Into::into)) + .await + .map_err(into_jsrpc_error) + }) + } + + fn get_transaction_by_hash(&self, hash: H256) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_impl(TransactionId::Hash(hash)) + .await + .map_err(into_jsrpc_error) + }) } fn get_transaction_by_block_hash_and_index( &self, block_hash: H256, index: Index, - ) -> Result> { - self.get_transaction_impl(TransactionId::Block(BlockId::Hash(block_hash), index)) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_impl(TransactionId::Block(BlockId::Hash(block_hash), index)) + .await + .map_err(into_jsrpc_error) + }) } fn get_transaction_by_block_number_and_index( &self, block_number: BlockNumber, index: Index, - ) -> Result> { - self.get_transaction_impl(TransactionId::Block(BlockId::Number(block_number), index)) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_impl(TransactionId::Block(BlockId::Number(block_number), index)) + .await + .map_err(into_jsrpc_error) + }) } - fn get_transaction_receipt(&self, hash: H256) -> Result> { - self.get_transaction_receipt_impl(hash) - .map_err(into_jsrpc_error) + fn get_transaction_receipt(&self, hash: H256) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_receipt_impl(hash) + .await + .map_err(into_jsrpc_error) + }) } - fn protocol_version(&self) -> Result { - Ok(self.protocol_version()) + fn protocol_version(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.protocol_version()) }) } - fn send_raw_transaction(&self, tx_bytes: Bytes) -> Result { - self.send_raw_transaction_impl(tx_bytes) - .map_err(into_jsrpc_error) + fn send_raw_transaction(&self, tx_bytes: Bytes) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .send_raw_transaction_impl(tx_bytes) + .await + .map_err(into_jsrpc_error) + }) } - fn syncing(&self) -> Result { - Ok(self.syncing_impl()) + fn syncing(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.syncing_impl()) }) } - fn accounts(&self) -> Result> { - Ok(self.accounts_impl()) + fn accounts(&self) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.accounts_impl()) }) } - fn coinbase(&self) -> Result
{ - Ok(self.coinbase_impl()) + fn coinbase(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.coinbase_impl()) }) } - fn compilers(&self) -> Result> { - Ok(self.compilers_impl()) + fn compilers(&self) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.compilers_impl()) }) } - fn hashrate(&self) -> Result { - Ok(self.hashrate_impl()) + fn hashrate(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.hashrate_impl()) }) } - fn get_uncle_count_by_block_hash(&self, hash: H256) -> Result> { - Ok(self.uncle_count_impl(BlockId::Hash(hash))) + fn get_uncle_count_by_block_hash(&self, hash: H256) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.uncle_count_impl(BlockId::Hash(hash))) }) } - fn get_uncle_count_by_block_number(&self, number: BlockNumber) -> Result> { - Ok(self.uncle_count_impl(BlockId::Number(number))) + fn get_uncle_count_by_block_number( + &self, + number: BlockNumber, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.uncle_count_impl(BlockId::Number(number))) }) } - fn mining(&self) -> Result { - Ok(self.mining_impl()) + fn mining(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.mining_impl()) }) } fn send_transaction( &self, _transaction_request: zksync_types::web3::types::TransactionRequest, - ) -> Result { + ) -> BoxFuture> { #[cfg(feature = "openzeppelin_tests")] - return self - .send_transaction_impl(_transaction_request) - .map_err(into_jsrpc_error); - - #[cfg(not(feature = "openzeppelin_tests"))] - Err(into_jsrpc_error(Web3Error::NotImplemented)) + let self_ = self.clone(); + Box::pin(async move { + #[cfg(feature = "openzeppelin_tests")] + return self_ + .send_transaction_impl(_transaction_request) + .await + .map_err(into_jsrpc_error); + + #[cfg(not(feature = "openzeppelin_tests"))] + Err(into_jsrpc_error(Web3Error::NotImplemented)) + }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs index 32503ab00fc7..8fbd3919c26c 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs @@ -1,4 +1,5 @@ pub mod debug; +pub mod en; pub mod eth; pub mod net; pub mod web3; diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index 1d37f765622a..fe0dc9a9d60b 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; // External uses use bigdecimal::BigDecimal; -use jsonrpc_core::Result; +use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; // Workspace uses @@ -12,7 +12,6 @@ use zksync_types::{ explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, transaction_request::CallRequest, - vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, Address, Bytes, L1BatchNumber, MiniblockNumber, H256, U256, U64, }; use zksync_web3_decl::error::Web3Error; @@ -24,162 +23,166 @@ use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into #[rpc] pub trait ZksNamespaceT { - #[rpc(name = "zks_estimateFee", returns = "Fee")] - fn estimate_fee(&self, req: CallRequest) -> Result; + #[rpc(name = "zks_estimateFee")] + fn estimate_fee(&self, req: CallRequest) -> BoxFuture>; - #[rpc(name = "zks_estimateGasL1ToL2", returns = "U256")] - fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> Result; + #[rpc(name = "zks_estimateGasL1ToL2")] + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> BoxFuture>; - #[rpc(name = "zks_getMainContract", returns = "Address")] - fn get_main_contract(&self) -> Result
; + #[rpc(name = "zks_getMainContract")] + fn get_main_contract(&self) -> BoxFuture>; - #[rpc(name = "zks_getTestnetPaymaster", returns = "Option
")] - fn get_testnet_paymaster(&self) -> Result>; + #[rpc(name = "zks_getTestnetPaymaster")] + fn get_testnet_paymaster(&self) -> BoxFuture>>; - #[rpc(name = "zks_getBridgeContracts", returns = "BridgeAddresses")] - fn get_bridge_contracts(&self) -> Result; + #[rpc(name = "zks_getBridgeContracts")] + fn get_bridge_contracts(&self) -> BoxFuture>; - #[rpc(name = "zks_L1ChainId", returns = "U64")] - fn l1_chain_id(&self) -> Result; + #[rpc(name = "zks_L1ChainId")] + fn l1_chain_id(&self) -> BoxFuture>; - #[rpc(name = "zks_getConfirmedTokens", returns = "Vec")] - fn get_confirmed_tokens(&self, from: u32, limit: u8) -> Result>; + #[rpc(name = "zks_getConfirmedTokens")] + fn get_confirmed_tokens(&self, from: u32, limit: u8) -> BoxFuture>>; - #[rpc(name = "zks_getTokenPrice", returns = "BigDecimal")] - fn get_token_price(&self, token_address: Address) -> Result; + #[rpc(name = "zks_getTokenPrice")] + fn get_token_price(&self, token_address: Address) -> BoxFuture>; - #[rpc(name = "zks_setContractDebugInfo", returns = "bool")] - fn set_contract_debug_info( + #[rpc(name = "zks_getAllAccountBalances")] + fn get_all_account_balances( &self, - contract_address: Address, - info: ContractSourceDebugInfo, - ) -> Result; - - #[rpc(name = "zks_getContractDebugInfo", returns = "ContractSourceDebugInfo")] - fn get_contract_debug_info( - &self, - contract_address: Address, - ) -> Result>; - - #[rpc(name = "zks_getTransactionTrace", returns = "Option")] - fn get_transaction_trace(&self, hash: H256) -> Result>; - - #[rpc(name = "zks_getAllAccountBalances", returns = "HashMap")] - fn get_all_account_balances(&self, address: Address) -> Result>; + address: Address, + ) -> BoxFuture>>; - #[rpc(name = "zks_getL2ToL1MsgProof", returns = "Option>")] + #[rpc(name = "zks_getL2ToL1MsgProof")] fn get_l2_to_l1_msg_proof( &self, block: MiniblockNumber, sender: Address, msg: H256, l2_log_position: Option, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc(name = "zks_getL2ToL1LogProof", returns = "Option>")] + #[rpc(name = "zks_getL2ToL1LogProof")] fn get_l2_to_l1_log_proof( &self, tx_hash: H256, index: Option, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc(name = "zks_L1BatchNumber", returns = "U64")] - fn get_l1_batch_number(&self) -> Result; + #[rpc(name = "zks_L1BatchNumber")] + fn get_l1_batch_number(&self) -> BoxFuture>; - #[rpc(name = "zks_getBlockDetails", returns = "Option")] - fn get_block_details(&self, block_number: MiniblockNumber) -> Result>; + #[rpc(name = "zks_getBlockDetails")] + fn get_block_details( + &self, + block_number: MiniblockNumber, + ) -> BoxFuture>>; - #[rpc(name = "zks_getL1BatchBlockRange", returns = "Option<(U64, U64)>")] - fn get_miniblock_range(&self, batch: L1BatchNumber) -> Result>; + #[rpc(name = "zks_getL1BatchBlockRange")] + fn get_miniblock_range(&self, batch: L1BatchNumber) -> BoxFuture>>; - #[rpc(name = "zks_setKnownBytecode", returns = "bool")] - fn set_known_bytecode(&self, bytecode: Bytes) -> Result; + #[rpc(name = "zks_setKnownBytecode")] + fn set_known_bytecode(&self, bytecode: Bytes) -> BoxFuture>; - #[rpc( - name = "zks_getTransactionDetails", - returns = "Option" - )] - fn get_transaction_details(&self, hash: H256) -> Result>; + #[rpc(name = "zks_getTransactionDetails")] + fn get_transaction_details(&self, hash: H256) -> BoxFuture>>; - #[rpc( - name = "zks_getRawBlockTransactions", - returns = "Vec" - )] + #[rpc(name = "zks_getRawBlockTransactions")] fn get_raw_block_transactions( &self, block_number: MiniblockNumber, - ) -> Result>; + ) -> BoxFuture>>; - #[rpc(name = "zks_getL1BatchDetails", returns = "Option")] - fn get_l1_batch_details(&self, batch: L1BatchNumber) -> Result>; + #[rpc(name = "zks_getL1BatchDetails")] + fn get_l1_batch_details( + &self, + batch: L1BatchNumber, + ) -> BoxFuture>>; - #[rpc(name = "zks_getBytecodeByHash", returns = "Option>")] - fn get_bytecode_by_hash(&self, hash: H256) -> Result>>; + #[rpc(name = "zks_getBytecodeByHash")] + fn get_bytecode_by_hash(&self, hash: H256) -> BoxFuture>>>; - #[rpc(name = "zks_getL1GasPrice", returns = "U64")] - fn get_l1_gas_price(&self) -> Result; + #[rpc(name = "zks_getL1GasPrice")] + fn get_l1_gas_price(&self) -> BoxFuture>; } impl ZksNamespaceT for ZksNamespace { - fn estimate_fee(&self, req: CallRequest) -> Result { - self.estimate_fee_impl(req).map_err(into_jsrpc_error) + fn estimate_fee(&self, req: CallRequest) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { self_.estimate_fee_impl(req).await.map_err(into_jsrpc_error) }) } - fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> Result { - self.estimate_l1_to_l2_gas_impl(req) - .map_err(into_jsrpc_error) + fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .estimate_l1_to_l2_gas_impl(req) + .await + .map_err(into_jsrpc_error) + }) } - fn get_main_contract(&self) -> Result
{ - Ok(self.get_main_contract_impl()) + fn get_main_contract(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_main_contract_impl()) }) } - fn get_miniblock_range(&self, batch: L1BatchNumber) -> Result> { - self.get_miniblock_range_impl(batch) - .map_err(into_jsrpc_error) + fn get_miniblock_range(&self, batch: L1BatchNumber) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_miniblock_range_impl(batch) + .await + .map_err(into_jsrpc_error) + }) } - fn get_testnet_paymaster(&self) -> Result> { - Ok(self.get_testnet_paymaster_impl()) + fn get_testnet_paymaster(&self) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_testnet_paymaster_impl()) }) } - fn get_bridge_contracts(&self) -> Result { - Ok(self.get_bridge_contracts_impl()) + fn get_bridge_contracts(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_bridge_contracts_impl()) }) } - fn l1_chain_id(&self) -> Result { - Ok(self.l1_chain_id_impl()) + fn l1_chain_id(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.l1_chain_id_impl()) }) } - fn get_confirmed_tokens(&self, from: u32, limit: u8) -> Result> { - self.get_confirmed_tokens_impl(from, limit) - .map_err(into_jsrpc_error) + fn get_confirmed_tokens(&self, from: u32, limit: u8) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_confirmed_tokens_impl(from, limit) + .await + .map_err(into_jsrpc_error) + }) } - fn get_token_price(&self, token_address: Address) -> Result { - self.get_token_price_impl(token_address) - .map_err(into_jsrpc_error) + fn get_token_price(&self, token_address: Address) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_token_price_impl(token_address) + .await + .map_err(into_jsrpc_error) + }) } - fn set_contract_debug_info( + fn get_all_account_balances( &self, address: Address, - info: ContractSourceDebugInfo, - ) -> Result { - Ok(self.set_contract_debug_info_impl(address, info)) - } - - fn get_contract_debug_info(&self, address: Address) -> Result> { - Ok(self.get_contract_debug_info_impl(address)) - } - - fn get_transaction_trace(&self, hash: H256) -> Result> { - Ok(self.get_transaction_trace_impl(hash)) - } - - fn get_all_account_balances(&self, address: Address) -> Result> { - self.get_all_account_balances_impl(address) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_all_account_balances_impl(address) + .await + .map_err(into_jsrpc_error) + }) } fn get_l2_to_l1_msg_proof( @@ -188,60 +191,108 @@ impl ZksNamespaceT for ZksNamespa sender: Address, msg: H256, l2_log_position: Option, - ) -> Result> { - self.get_l2_to_l1_msg_proof_impl(block, sender, msg, l2_log_position) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_l2_to_l1_msg_proof_impl(block, sender, msg, l2_log_position) + .await + .map_err(into_jsrpc_error) + }) } fn get_l2_to_l1_log_proof( &self, tx_hash: H256, index: Option, - ) -> Result> { - self.get_l2_to_l1_log_proof_impl(tx_hash, index) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_l2_to_l1_log_proof_impl(tx_hash, index) + .await + .map_err(into_jsrpc_error) + }) } - fn get_l1_batch_number(&self) -> Result { - self.get_l1_batch_number_impl().map_err(into_jsrpc_error) + fn get_l1_batch_number(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_l1_batch_number_impl() + .await + .map_err(into_jsrpc_error) + }) } - fn get_block_details(&self, block_number: MiniblockNumber) -> Result> { - self.get_block_details_impl(block_number) - .map_err(into_jsrpc_error) + fn get_block_details( + &self, + block_number: MiniblockNumber, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_block_details_impl(block_number) + .await + .map_err(into_jsrpc_error) + }) } - fn get_transaction_details(&self, hash: H256) -> Result> { - self.get_transaction_details_impl(hash) - .map_err(into_jsrpc_error) + fn get_transaction_details(&self, hash: H256) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_transaction_details_impl(hash) + .await + .map_err(into_jsrpc_error) + }) } - fn set_known_bytecode(&self, _bytecode: Bytes) -> Result { + fn set_known_bytecode(&self, _bytecode: Bytes) -> BoxFuture> { #[cfg(feature = "openzeppelin_tests")] - return Ok(self.set_known_bytecode_impl(_bytecode)); - - #[cfg(not(feature = "openzeppelin_tests"))] - Err(into_jsrpc_error(Web3Error::NotImplemented)) + let self_ = self.clone(); + Box::pin(async move { + #[cfg(feature = "openzeppelin_tests")] + return Ok(self_.set_known_bytecode_impl(_bytecode)); + + #[cfg(not(feature = "openzeppelin_tests"))] + Err(into_jsrpc_error(Web3Error::NotImplemented)) + }) } fn get_raw_block_transactions( &self, block_number: MiniblockNumber, - ) -> Result> { - self.get_raw_block_transactions_impl(block_number) - .map_err(into_jsrpc_error) + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_raw_block_transactions_impl(block_number) + .await + .map_err(into_jsrpc_error) + }) } - fn get_l1_batch_details(&self, batch: L1BatchNumber) -> Result> { - self.get_l1_batch_details_impl(batch) - .map_err(into_jsrpc_error) + fn get_l1_batch_details( + &self, + batch: L1BatchNumber, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_l1_batch_details_impl(batch) + .await + .map_err(into_jsrpc_error) + }) } - fn get_bytecode_by_hash(&self, hash: H256) -> Result>> { - Ok(self.get_bytecode_by_hash_impl(hash)) + fn get_bytecode_by_hash(&self, hash: H256) -> BoxFuture>>> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_bytecode_by_hash_impl(hash).await) }) } - fn get_l1_gas_price(&self) -> Result { - Ok(self.get_l1_gas_price_impl()) + fn get_l1_gas_price(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { Ok(self_.get_l1_gas_price_impl()) }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs index 24161a48fee0..2306544cd5dc 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use jsonrpc_core::Result; +use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::typed; use jsonrpc_pubsub::{Session, SubscriptionId}; @@ -31,7 +31,7 @@ pub trait Web3PubSub { &self, meta: Option, subscription: SubscriptionId, - ) -> Result; + ) -> BoxFuture>; } impl Web3PubSub for EthSubscribe { @@ -44,10 +44,18 @@ impl Web3PubSub for EthSubscribe { sub_type: String, params: Option, ) { - self.sub(subscriber, sub_type, params); + let self_ = self.clone(); + // Fire and forget is OK here. + self.runtime_handle + .spawn(async move { self_.sub(subscriber, sub_type, params).await }); } - fn unsubscribe(&self, _meta: Option, id: SubscriptionId) -> Result { - self.unsub(id) + fn unsubscribe( + &self, + _meta: Option, + id: SubscriptionId, + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { self_.unsub(id).await }) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs index dcdba9bfdb52..3ead6304d7e0 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs @@ -2,4 +2,40 @@ //! Consists mostly of boilerplate code implementing the `jsonrpsee` server traits for the corresponding //! namespace structures defined in `zksync_core`. +use std::error::Error; +use zksync_web3_decl::error::Web3Error; +use zksync_web3_decl::jsonrpsee::types::{error::ErrorCode, ErrorObjectOwned}; + pub mod namespaces; + +pub fn from_std_error(e: impl Error) -> ErrorObjectOwned { + ErrorObjectOwned::owned(ErrorCode::InternalError.code(), e.to_string(), Some(())) +} + +pub fn into_jsrpc_error(err: Web3Error) -> ErrorObjectOwned { + ErrorObjectOwned::owned( + match err { + Web3Error::InternalError | Web3Error::NotImplemented => ErrorCode::InternalError.code(), + Web3Error::NoBlock + | Web3Error::NoSuchFunction + | Web3Error::RLPError(_) + | Web3Error::InvalidTransactionData(_) + | Web3Error::TooManyTopics + | Web3Error::FilterNotFound + | Web3Error::InvalidFeeParams(_) + | Web3Error::InvalidFilterBlockHash + | Web3Error::LogsLimitExceeded(_, _, _) => ErrorCode::InvalidParams.code(), + Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3, + Web3Error::PubSubTimeout => 4, + Web3Error::RequestTimeout => 5, + }, + match err { + Web3Error::SubmitTransactionError(ref message, _) => message.clone(), + _ => err.to_string(), + }, + match err { + Web3Error::SubmitTransactionError(_, data) => Some(format!("0x{}", hex::encode(data))), + _ => None, + }, + ) +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs new file mode 100644 index 000000000000..0bd61bbbc3d2 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs @@ -0,0 +1,50 @@ +use zksync_types::{ + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, + transaction_request::CallRequest, + H256, +}; +use zksync_web3_decl::{ + jsonrpsee::core::{async_trait, RpcResult}, + namespaces::debug::DebugNamespaceServer, +}; + +use crate::api_server::web3::{backend_jsonrpsee::into_jsrpc_error, namespaces::DebugNamespace}; + +#[async_trait] +impl DebugNamespaceServer for DebugNamespace { + async fn trace_block_by_number( + &self, + block: BlockNumber, + options: Option, + ) -> RpcResult> { + self.debug_trace_block_impl(BlockId::Number(block), options) + .await + .map_err(into_jsrpc_error) + } + async fn trace_block_by_hash( + &self, + hash: H256, + options: Option, + ) -> RpcResult> { + self.debug_trace_block_impl(BlockId::Hash(hash), options) + .await + .map_err(into_jsrpc_error) + } + async fn trace_call( + &self, + request: CallRequest, + block: Option, + options: Option, + ) -> RpcResult { + self.debug_trace_call_impl(request, block, options) + .await + .map_err(into_jsrpc_error) + } + async fn trace_transaction( + &self, + tx_hash: H256, + options: Option, + ) -> RpcResult> { + Ok(self.debug_trace_transaction_impl(tx_hash, options).await) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs new file mode 100644 index 000000000000..69dce6f6dae4 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/en.rs @@ -0,0 +1,23 @@ +use zksync_types::{api::en::SyncBlock, MiniblockNumber}; +use zksync_web3_decl::{ + jsonrpsee::core::{async_trait, RpcResult}, + namespaces::en::EnNamespaceServer, +}; + +use crate::{ + api_server::web3::{backend_jsonrpsee::into_jsrpc_error, namespaces::EnNamespace}, + l1_gas_price::L1GasPriceProvider, +}; + +#[async_trait] +impl EnNamespaceServer for EnNamespace { + async fn sync_l2_block( + &self, + block_number: MiniblockNumber, + include_transactions: bool, + ) -> RpcResult> { + self.sync_l2_block_impl(block_number, include_transactions) + .await + .map_err(into_jsrpc_error) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs index 958c4ccd8314..290c1c770ccd 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/eth.rs @@ -1,5 +1,3 @@ -use crate::{api_server::web3::namespaces::eth::EthNamespace, l1_gas_price::L1GasPriceProvider}; - use zksync_types::{ api::{ Block, BlockId, BlockIdVariant, BlockNumber, Log, Transaction, TransactionId, @@ -9,197 +7,224 @@ use zksync_types::{ web3::types::{Index, SyncState}, Address, Bytes, H256, U256, U64, }; - use zksync_web3_decl::{ - jsonrpsee::{core::RpcResult, types::error::CallError}, + jsonrpsee::core::{async_trait, RpcResult}, namespaces::eth::EthNamespaceServer, types::{Filter, FilterChanges}, }; +use crate::{ + api_server::web3::{backend_jsonrpsee::into_jsrpc_error, EthNamespace}, + l1_gas_price::L1GasPriceProvider, +}; + +#[async_trait] impl EthNamespaceServer for EthNamespace { - fn get_block_number(&self) -> RpcResult { - self.get_block_number_impl() - .map_err(|err| CallError::from_std_error(err).into()) + async fn get_block_number(&self) -> RpcResult { + self.get_block_number_impl().await.map_err(into_jsrpc_error) } - fn chain_id(&self) -> RpcResult { + async fn chain_id(&self) -> RpcResult { Ok(self.chain_id_impl()) } - fn call(&self, req: CallRequest, block: Option) -> RpcResult { + async fn call(&self, req: CallRequest, block: Option) -> RpcResult { self.call_impl(req, block.map(Into::into)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn estimate_gas(&self, req: CallRequest, block: Option) -> RpcResult { + async fn estimate_gas(&self, req: CallRequest, block: Option) -> RpcResult { self.estimate_gas_impl(req, block) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn gas_price(&self) -> RpcResult { - self.gas_price_impl() - .map_err(|err| CallError::from_std_error(err).into()) + async fn gas_price(&self) -> RpcResult { + self.gas_price_impl().map_err(into_jsrpc_error) } - fn new_filter(&self, filter: Filter) -> RpcResult { - self.new_filter_impl(filter) - .map_err(|err| CallError::from_std_error(err).into()) + async fn new_filter(&self, filter: Filter) -> RpcResult { + self.new_filter_impl(filter).await.map_err(into_jsrpc_error) } - fn new_block_filter(&self) -> RpcResult { - self.new_block_filter_impl() - .map_err(|err| CallError::from_std_error(err).into()) + async fn new_block_filter(&self) -> RpcResult { + self.new_block_filter_impl().await.map_err(into_jsrpc_error) } - fn uninstall_filter(&self, idx: U256) -> RpcResult { - Ok(self.uninstall_filter_impl(idx)) + async fn uninstall_filter(&self, idx: U256) -> RpcResult { + Ok(self.uninstall_filter_impl(idx).await) } - fn new_pending_transaction_filter(&self) -> RpcResult { - Ok(self.new_pending_transaction_filter_impl()) + async fn new_pending_transaction_filter(&self) -> RpcResult { + Ok(self.new_pending_transaction_filter_impl().await) } - fn get_logs(&self, filter: Filter) -> RpcResult> { - self.get_logs_impl(filter) - .map_err(|err| CallError::from_std_error(err).into()) + async fn get_logs(&self, filter: Filter) -> RpcResult> { + self.get_logs_impl(filter).await.map_err(into_jsrpc_error) } - fn get_filter_logs(&self, filter_index: U256) -> RpcResult { + async fn get_filter_logs(&self, filter_index: U256) -> RpcResult { self.get_filter_logs_impl(filter_index) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_filter_changes(&self, filter_index: U256) -> RpcResult { + async fn get_filter_changes(&self, filter_index: U256) -> RpcResult { self.get_filter_changes_impl(filter_index) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_balance(&self, address: Address, block: Option) -> RpcResult { + async fn get_balance( + &self, + address: Address, + block: Option, + ) -> RpcResult { self.get_balance_impl(address, block.map(Into::into)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_block_by_number( + async fn get_block_by_number( &self, block_number: BlockNumber, full_transactions: bool, ) -> RpcResult>> { self.get_block_impl(BlockId::Number(block_number), full_transactions) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_block_by_hash( + async fn get_block_by_hash( &self, hash: H256, full_transactions: bool, ) -> RpcResult>> { self.get_block_impl(BlockId::Hash(hash), full_transactions) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_block_transaction_count_by_number( + async fn get_block_transaction_count_by_number( &self, block_number: BlockNumber, ) -> RpcResult> { self.get_block_transaction_count_impl(BlockId::Number(block_number)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_block_transaction_count_by_hash(&self, block_hash: H256) -> RpcResult> { + async fn get_block_transaction_count_by_hash( + &self, + block_hash: H256, + ) -> RpcResult> { self.get_block_transaction_count_impl(BlockId::Hash(block_hash)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_code(&self, address: Address, block: Option) -> RpcResult { + async fn get_code(&self, address: Address, block: Option) -> RpcResult { self.get_code_impl(address, block.map(Into::into)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_storage_at( + async fn get_storage_at( &self, address: Address, idx: U256, block: Option, ) -> RpcResult { self.get_storage_at_impl(address, idx, block.map(Into::into)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_count( + async fn get_transaction_count( &self, address: Address, block: Option, ) -> RpcResult { self.get_transaction_count_impl(address, block.map(Into::into)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_by_hash(&self, hash: H256) -> RpcResult> { + async fn get_transaction_by_hash(&self, hash: H256) -> RpcResult> { self.get_transaction_impl(TransactionId::Hash(hash)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_by_block_hash_and_index( + async fn get_transaction_by_block_hash_and_index( &self, block_hash: H256, index: Index, ) -> RpcResult> { self.get_transaction_impl(TransactionId::Block(BlockId::Hash(block_hash), index)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_by_block_number_and_index( + async fn get_transaction_by_block_number_and_index( &self, block_number: BlockNumber, index: Index, ) -> RpcResult> { self.get_transaction_impl(TransactionId::Block(BlockId::Number(block_number), index)) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_receipt(&self, hash: H256) -> RpcResult> { + async fn get_transaction_receipt(&self, hash: H256) -> RpcResult> { self.get_transaction_receipt_impl(hash) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn protocol_version(&self) -> RpcResult { + async fn protocol_version(&self) -> RpcResult { Ok(self.protocol_version()) } - fn send_raw_transaction(&self, tx_bytes: Bytes) -> RpcResult { + async fn send_raw_transaction(&self, tx_bytes: Bytes) -> RpcResult { self.send_raw_transaction_impl(tx_bytes) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn syncing(&self) -> RpcResult { + async fn syncing(&self) -> RpcResult { Ok(self.syncing_impl()) } - fn accounts(&self) -> RpcResult> { + async fn accounts(&self) -> RpcResult> { Ok(self.accounts_impl()) } - fn coinbase(&self) -> RpcResult
{ + async fn coinbase(&self) -> RpcResult
{ Ok(self.coinbase_impl()) } - fn compilers(&self) -> RpcResult> { + async fn compilers(&self) -> RpcResult> { Ok(self.compilers_impl()) } - fn hashrate(&self) -> RpcResult { + async fn hashrate(&self) -> RpcResult { Ok(self.hashrate_impl()) } - fn get_uncle_count_by_block_hash(&self, hash: H256) -> RpcResult> { + async fn get_uncle_count_by_block_hash(&self, hash: H256) -> RpcResult> { Ok(self.uncle_count_impl(BlockId::Hash(hash))) } - fn get_uncle_count_by_block_number(&self, number: BlockNumber) -> RpcResult> { + async fn get_uncle_count_by_block_number( + &self, + number: BlockNumber, + ) -> RpcResult> { Ok(self.uncle_count_impl(BlockId::Number(number))) } - fn mining(&self) -> RpcResult { + async fn mining(&self) -> RpcResult { Ok(self.mining_impl()) } } diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs index 0e9ffad42953..2551b90e824e 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs @@ -1,3 +1,5 @@ +pub mod debug; +pub mod en; pub mod eth; pub mod eth_subscribe; pub mod net; diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/net.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/net.rs index a949b41e9403..8b3743ac483a 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/net.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/net.rs @@ -1,7 +1,8 @@ -use crate::api_server::web3::namespaces::net::NetNamespace; use zksync_types::U256; use zksync_web3_decl::{jsonrpsee::core::RpcResult, namespaces::net::NetNamespaceServer}; +use crate::api_server::web3::NetNamespace; + impl NetNamespaceServer for NetNamespace { fn version(&self) -> RpcResult { Ok(self.version_impl()) diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/web3.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/web3.rs index 0d45cf47c61d..8de5cd3fe2c9 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/web3.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/web3.rs @@ -1,6 +1,7 @@ -use crate::api_server::web3::namespaces::web3::Web3Namespace; use zksync_web3_decl::{jsonrpsee::core::RpcResult, namespaces::web3::Web3NamespaceServer}; +use crate::api_server::web3::Web3Namespace; + impl Web3NamespaceServer for Web3Namespace { fn client_version(&self) -> RpcResult { Ok(self.client_version_impl()) diff --git a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index a85b7f29c074..eef2575c21b4 100644 --- a/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -1,82 +1,75 @@ -use crate::{api_server::web3::namespaces::zks::ZksNamespace, l1_gas_price::L1GasPriceProvider}; use bigdecimal::BigDecimal; + use std::collections::HashMap; + use zksync_types::{ api::{BridgeAddresses, L2ToL1LogProof, TransactionDetails, U64}, explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, transaction_request::CallRequest, - vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, Address, L1BatchNumber, MiniblockNumber, H256, U256, }; use zksync_web3_decl::{ - jsonrpsee::{core::RpcResult, types::error::CallError}, + jsonrpsee::core::{async_trait, RpcResult}, namespaces::zks::ZksNamespaceServer, types::Token, }; +use crate::{ + api_server::web3::{backend_jsonrpsee::into_jsrpc_error, ZksNamespace}, + l1_gas_price::L1GasPriceProvider, +}; + +#[async_trait] impl ZksNamespaceServer for ZksNamespace { - fn estimate_fee(&self, req: CallRequest) -> RpcResult { - self.estimate_fee_impl(req) - .map_err(|err| CallError::from_std_error(err).into()) + async fn estimate_fee(&self, req: CallRequest) -> RpcResult { + self.estimate_fee_impl(req).await.map_err(into_jsrpc_error) } - fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult { + async fn estimate_gas_l1_to_l2(&self, req: CallRequest) -> RpcResult { self.estimate_l1_to_l2_gas_impl(req) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_main_contract(&self) -> RpcResult
{ + async fn get_main_contract(&self) -> RpcResult
{ Ok(self.get_main_contract_impl()) } - fn get_testnet_paymaster(&self) -> RpcResult> { + async fn get_testnet_paymaster(&self) -> RpcResult> { Ok(self.get_testnet_paymaster_impl()) } - fn get_bridge_contracts(&self) -> RpcResult { + async fn get_bridge_contracts(&self) -> RpcResult { Ok(self.get_bridge_contracts_impl()) } - fn l1_chain_id(&self) -> RpcResult { + async fn l1_chain_id(&self) -> RpcResult { Ok(self.l1_chain_id_impl()) } - fn get_confirmed_tokens(&self, from: u32, limit: u8) -> RpcResult> { + async fn get_confirmed_tokens(&self, from: u32, limit: u8) -> RpcResult> { self.get_confirmed_tokens_impl(from, limit) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_token_price(&self, token_address: Address) -> RpcResult { + async fn get_token_price(&self, token_address: Address) -> RpcResult { self.get_token_price_impl(token_address) - .map_err(|err| CallError::from_std_error(err).into()) - } - - fn set_contract_debug_info( - &self, - address: Address, - info: ContractSourceDebugInfo, - ) -> RpcResult { - Ok(self.set_contract_debug_info_impl(address, info)) + .await + .map_err(into_jsrpc_error) } - fn get_contract_debug_info( + async fn get_all_account_balances( &self, address: Address, - ) -> RpcResult> { - Ok(self.get_contract_debug_info_impl(address)) - } - - fn get_transaction_trace(&self, hash: H256) -> RpcResult> { - Ok(self.get_transaction_trace_impl(hash)) - } - - fn get_all_account_balances(&self, address: Address) -> RpcResult> { + ) -> RpcResult> { self.get_all_account_balances_impl(address) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_l2_to_l1_msg_proof( + async fn get_l2_to_l1_msg_proof( &self, block: MiniblockNumber, sender: Address, @@ -84,59 +77,70 @@ impl ZksNamespaceServer for ZksNa l2_log_position: Option, ) -> RpcResult> { self.get_l2_to_l1_msg_proof_impl(block, sender, msg, l2_log_position) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_l2_to_l1_log_proof( + async fn get_l2_to_l1_log_proof( &self, tx_hash: H256, index: Option, ) -> RpcResult> { self.get_l2_to_l1_log_proof_impl(tx_hash, index) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_l1_batch_number(&self) -> RpcResult { + async fn get_l1_batch_number(&self) -> RpcResult { self.get_l1_batch_number_impl() - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult> { + async fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult> { self.get_miniblock_range_impl(batch) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_block_details(&self, block_number: MiniblockNumber) -> RpcResult> { + async fn get_block_details( + &self, + block_number: MiniblockNumber, + ) -> RpcResult> { self.get_block_details_impl(block_number) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_transaction_details(&self, hash: H256) -> RpcResult> { + async fn get_transaction_details(&self, hash: H256) -> RpcResult> { self.get_transaction_details_impl(hash) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_raw_block_transactions( + async fn get_raw_block_transactions( &self, block_number: MiniblockNumber, ) -> RpcResult> { self.get_raw_block_transactions_impl(block_number) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_l1_batch_details( + async fn get_l1_batch_details( &self, batch_number: L1BatchNumber, ) -> RpcResult> { self.get_l1_batch_details_impl(batch_number) - .map_err(|err| CallError::from_std_error(err).into()) + .await + .map_err(into_jsrpc_error) } - fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>> { - Ok(self.get_bytecode_by_hash_impl(hash)) + async fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>> { + Ok(self.get_bytecode_by_hash_impl(hash).await) } - fn get_l1_gas_price(&self) -> RpcResult { + async fn get_l1_gas_price(&self) -> RpcResult { Ok(self.get_l1_gas_price_impl()) } } diff --git a/core/bin/zksync_core/src/api_server/web3/mod.rs b/core/bin/zksync_core/src/api_server/web3/mod.rs index ed895234434a..5c46cfbbda8b 100644 --- a/core/bin/zksync_core/src/api_server/web3/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/mod.rs @@ -1,46 +1,56 @@ // Built-in uses use std::collections::HashMap; use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; // External uses use futures::channel::oneshot; use futures::FutureExt; use jsonrpc_core::IoHandler; +use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; -use tokio::sync::watch; +use tokio::sync::{watch, RwLock}; +use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; // Workspace uses use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner}; -use zksync_types::{Address, H256}; +use zksync_types::{api, Address, MiniblockNumber, H256}; use zksync_web3_decl::{ + error::Web3Error, jsonrpsee::{server::ServerBuilder, RpcModule}, - namespaces::{EthNamespaceServer, NetNamespaceServer, Web3NamespaceServer, ZksNamespaceServer}, + namespaces::{ + DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, NetNamespaceServer, + Web3NamespaceServer, ZksNamespaceServer, + }, }; +use self::state::InternalApiConfig; use crate::l1_gas_price::L1GasPriceProvider; use crate::sync_layer::SyncState; -use self::state::InternalApiConfig; - // Local uses use super::tx_sender::TxSender; +use crate::api_server::web3::api_health_check::ApiHealthCheck; use backend_jsonrpc::{ + error::internal_error, namespaces::{ - debug::DebugNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, web3::Web3NamespaceT, - zks::ZksNamespaceT, + debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, + web3::Web3NamespaceT, zks::ZksNamespaceT, }, pub_sub::Web3PubSub, }; use namespaces::{ - DebugNamespace, EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, ZksNamespace, + DebugNamespace, EnNamespace, EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, + ZksNamespace, }; use pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; use state::{Filters, RpcState}; +use zksync_health_check::CheckHealthStatus; +pub mod api_health_check; pub mod backend_jsonrpc; pub mod backend_jsonrpsee; pub mod namespaces; @@ -70,6 +80,7 @@ pub struct ApiBuilder { subscriptions_limit: Option, sync_state: Option, threads: Option, + vm_concurrency_limit: Option, polling_interval: Option, accounts: HashMap, debug_namespace_config: Option<(BaseSystemContractsHashes, u64, Option)>, @@ -86,6 +97,7 @@ impl ApiBuilder { filters_limit: None, subscriptions_limit: None, threads: None, + vm_concurrency_limit: None, polling_interval: None, debug_namespace_config: None, accounts: Default::default(), @@ -103,6 +115,7 @@ impl ApiBuilder { filters_limit: None, subscriptions_limit: None, threads: None, + vm_concurrency_limit: None, polling_interval: None, debug_namespace_config: None, accounts: Default::default(), @@ -150,6 +163,11 @@ impl ApiBuilder { self } + pub fn with_vm_concurrency_limit(mut self, vm_concurrency_limit: usize) -> Self { + self.vm_concurrency_limit = Some(vm_concurrency_limit); + self + } + pub fn enable_debug_namespace( mut self, base_system_contract_hashes: BaseSystemContractsHashes, @@ -198,7 +216,7 @@ impl ApiBuilder { } } - fn build_rpc_module(&self) -> RpcModule> { + async fn build_rpc_module(&self) -> RpcModule> { let zksync_network_id = self.config.l2_chain_id; let rpc_app = self.build_rpc_state(); @@ -206,12 +224,8 @@ impl ApiBuilder { let eth = EthNamespace::new(rpc_app.clone()); let net = NetNamespace::new(zksync_network_id); let web3 = Web3Namespace; - let zks = ZksNamespace::new(rpc_app); - - assert!( - self.debug_namespace_config.is_none(), - "Debug namespace is not supported with jsonrpsee_backend" - ); + let zks = ZksNamespace::new(rpc_app.clone()); + let en = EnNamespace::new(rpc_app.clone()); // Collect all the methods into a single RPC module. let mut rpc: RpcModule<_> = eth.into_rpc(); @@ -221,18 +235,33 @@ impl ApiBuilder { .expect("Can't merge web3 namespace"); rpc.merge(zks.into_rpc()) .expect("Can't merge zks namespace"); - + rpc.merge(en.into_rpc()).expect("Can't merge en namespace"); + + if let Some((hashes, fair_l2_gas_price, cache_misses_limit)) = self.debug_namespace_config { + rpc.merge( + DebugNamespace::new( + rpc_app.connection_pool, + hashes, + fair_l2_gas_price, + cache_misses_limit, + rpc_app.tx_sender.0.vm_concurrency_limiter.clone(), + rpc_app.tx_sender.0.factory_deps_cache.clone(), + ) + .await + .into_rpc(), + ) + .expect("Can't merge debug namespace"); + } rpc } - pub fn build( + pub async fn build( mut self, stop_receiver: watch::Receiver, - ) -> Vec> { + ) -> (Vec>, ApiHealthCheck) { if self.filters_limit.is_none() { vlog::warn!("Filters limit is not set - unlimited filters are allowed"); } - match (&self.transport, self.subscriptions_limit) { (Some(ApiTransport::WebSocket(_)), None) => { vlog::warn!( @@ -249,28 +278,65 @@ impl ApiBuilder { match (self.backend, self.transport.take()) { (ApiBackend::Jsonrpc, Some(ApiTransport::Http(addr))) => { - vec![self.build_jsonrpc_http(addr)] + let (api_health_check, status_sender) = self.create_health_check(); + ( + vec![ + self.build_jsonrpc_http(addr, stop_receiver, status_sender) + .await, + ], + api_health_check, + ) } (ApiBackend::Jsonrpc, Some(ApiTransport::WebSocket(addr))) => { - self.build_jsonrpc_ws(addr, stop_receiver) + let (api_health_check, status_sender) = self.create_health_check(); + ( + self.build_jsonrpc_ws(addr, stop_receiver, status_sender), + api_health_check, + ) } (ApiBackend::Jsonrpsee, Some(ApiTransport::Http(addr))) => { - vec![self.build_jsonrpsee_http(addr)] + let (api_health_check, status_sender) = self.create_health_check(); + ( + vec![ + self.build_jsonrpsee_http(addr, stop_receiver, status_sender) + .await, + ], + api_health_check, + ) } (ApiBackend::Jsonrpsee, Some(ApiTransport::WebSocket(addr))) => { - vec![self.build_jsonrpsee_ws(addr)] + let (api_health_check, status_sender) = self.create_health_check(); + ( + vec![ + self.build_jsonrpsee_ws(addr, stop_receiver, status_sender) + .await, + ], + api_health_check, + ) } (_, None) => panic!("ApiTransport is not specified"), } } - fn build_jsonrpc_http(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { + fn create_health_check(&self) -> (ApiHealthCheck, watch::Sender) { + let (status_sender, receiver) = + watch::channel(CheckHealthStatus::NotReady("Api is not ready".into())); + (ApiHealthCheck::new(receiver), status_sender) + } + + async fn build_jsonrpc_http( + self, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + api_health_check: watch::Sender, + ) -> tokio::task::JoinHandle<()> { let io_handler = { let zksync_network_id = self.config.l2_chain_id; let rpc_state = self.build_rpc_state(); let mut io = IoHandler::new(); io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); + io.extend_with(EnNamespace::new(rpc_state.clone()).to_delegate()); io.extend_with(Web3Namespace.to_delegate()); io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); if let Some((hashes, fair_l2_gas_price, cache_misses_limit)) = @@ -282,7 +348,10 @@ impl ApiBuilder { hashes, fair_l2_gas_price, cache_misses_limit, + rpc_state.tx_sender.0.vm_concurrency_limiter.clone(), + rpc_state.tx_sender.0.factory_deps_cache.clone(), ) + .await .to_delegate(), ); } @@ -304,15 +373,31 @@ impl ApiBuilder { .start_http(&addr) .unwrap(); + let close_handler = server.close_handle(); + std::thread::spawn(move || { + let stop_signal = futures::executor::block_on(stop_receiver.changed()); + if stop_signal.is_ok() { + vlog::info!("Stop signal received, web3 HTTP JSON RPC API is shutting down"); + close_handler.close(); + } + }); + api_health_check.send(CheckHealthStatus::Ready).unwrap(); server.wait(); + runtime.shutdown_timeout(Duration::from_secs(10)); + let _ = sender; }); tokio::spawn(recv.map(drop)) } - fn build_jsonrpsee_http(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { - let rpc = self.build_rpc_module(); + async fn build_jsonrpsee_http( + self, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + api_health_check: watch::Sender, + ) -> tokio::task::JoinHandle<()> { + let rpc = self.build_rpc_module().await; // Start the server in a separate tokio runtime from a dedicated thread. let (sender, recv) = oneshot::channel::<()>(); @@ -323,10 +408,30 @@ impl ApiBuilder { .build() .unwrap(); + // Setup CORS. + let cors = CorsLayer::new() + // Allow `POST` when accessing the resource + .allow_methods([hyper::Method::POST]) + // Allow requests from any origin + .allow_origin(tower_http::cors::Any) + .allow_headers([hyper::header::CONTENT_TYPE]); + + // Setup metrics for the number of in-flight txs. + let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); + runtime.spawn(counter.run_emitter(Duration::from_secs(10), |count| async move { + metrics::histogram!("api.web3.in_flight_requests", count as f64, "scheme" => "http"); + })); + + // Prepare middleware. + let middleware = tower::ServiceBuilder::new() + .layer(in_flight_requests_layer) + .layer(cors); + runtime.block_on(async move { let server = ServerBuilder::default() .http_only() .max_connections(5000) + .set_middleware(middleware) .build(addr) .await .expect("Can't start the HTTP JSON RPC server"); @@ -334,9 +439,21 @@ impl ApiBuilder { let server_handle = server .start(rpc) .expect("Failed to start HTTP JSON RPC application"); - server_handle.stopped().await - }); + let close_handle = server_handle.clone(); + tokio::spawn(async move { + if stop_receiver.changed().await.is_ok() { + vlog::info!( + "Stop signal received, web3 HTTP JSON RPC API is shutting down" + ); + close_handle.stop().unwrap(); + } + }); + api_health_check.send(CheckHealthStatus::Ready).unwrap(); + server_handle.stopped().await; + vlog::info!("HTTP JSON RPC API stopped"); + }); + runtime.shutdown_timeout(Duration::from_secs(10)); sender.send(()).unwrap(); }); @@ -344,12 +461,17 @@ impl ApiBuilder { tokio::spawn(recv.map(drop)) } - fn build_jsonrpsee_ws(self, addr: SocketAddr) -> tokio::task::JoinHandle<()> { + async fn build_jsonrpsee_ws( + self, + addr: SocketAddr, + mut stop_receiver: watch::Receiver, + api_health_check: watch::Sender, + ) -> tokio::task::JoinHandle<()> { vlog::warn!( "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" ); - let rpc = self.build_rpc_module(); + let rpc = self.build_rpc_module().await; // Start the server in a separate tokio runtime from a dedicated thread. let (sender, recv) = oneshot::channel::<()>(); @@ -370,9 +492,18 @@ impl ApiBuilder { let server_handle = server .start(rpc) .expect("Failed to start WS JSON RPC application"); - server_handle.stopped().await - }); + api_health_check.send(CheckHealthStatus::Ready).unwrap(); + let close_handle = server_handle.clone(); + tokio::spawn(async move { + if stop_receiver.changed().await.is_ok() { + vlog::info!("Stop signal received, web3 WS JSON RPC API is shutting down"); + close_handle.stop().unwrap(); + } + }); + server_handle.stopped().await; + }); + runtime.shutdown_timeout(Duration::from_secs(10)); sender.send(()).unwrap(); }); @@ -384,8 +515,15 @@ impl ApiBuilder { self, addr: SocketAddr, mut stop_receiver: watch::Receiver, + api_health_check: watch::Sender, ) -> Vec> { - let pub_sub = EthSubscribe::default(); + let jsonrpc_runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(self.threads.unwrap()) + .build() + .unwrap(); + + let pub_sub = EthSubscribe::new(jsonrpc_runtime.handle().clone()); let polling_interval = self.polling_interval.expect("Polling interval is not set"); let mut notify_handles = vec![ @@ -416,34 +554,40 @@ impl ApiBuilder { let mut io = PubSubHandler::default(); io.extend_with(pub_sub.to_delegate()); io.extend_with(EthNamespace::new(rpc_state.clone()).to_delegate()); - io.extend_with(ZksNamespace::new(rpc_state).to_delegate()); + io.extend_with(ZksNamespace::new(rpc_state.clone()).to_delegate()); + io.extend_with(EnNamespace::new(rpc_state).to_delegate()); io.extend_with(Web3Namespace.to_delegate()); io.extend_with(NetNamespace::new(zksync_network_id).to_delegate()); io }; - let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( - io, - |context: &jsonrpc_ws_server::RequestContext| { - Arc::new(jsonrpc_pubsub::Session::new(context.sender())) - }, - ) - .max_connections(self.subscriptions_limit.unwrap_or(usize::MAX)) - .session_stats(TrackOpenWsConnections) - .start(&addr) - .unwrap(); - let close_handler = server.close_handle(); std::thread::spawn(move || { + let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( + io, + |context: &jsonrpc_ws_server::RequestContext| { + Arc::new(jsonrpc_pubsub::Session::new(context.sender())) + }, + ) + .event_loop_executor(jsonrpc_runtime.handle().clone()) + .max_connections(self.subscriptions_limit.unwrap_or(usize::MAX)) + .session_stats(TrackOpenWsConnections) + .start(&addr) + .unwrap(); + let close_handler = server.close_handle(); + + std::thread::spawn(move || { + let stop_signal = futures::executor::block_on(stop_receiver.changed()); + if stop_signal.is_ok() { + close_handler.close(); + vlog::info!("Stop signal received, WS JSON RPC API is shutting down"); + } + }); + + api_health_check.send(CheckHealthStatus::Ready).unwrap(); server.wait().unwrap(); + jsonrpc_runtime.shutdown_timeout(Duration::from_secs(10)); let _ = sender; }); - std::thread::spawn(move || { - let stop_signal = futures::executor::block_on(stop_receiver.changed()); - if stop_signal.is_ok() { - close_handler.close(); - vlog::info!("Stop signal received, WS JSON RPC API is shutting down"); - } - }); notify_handles.push(tokio::spawn(recv.map(drop))); notify_handles @@ -461,3 +605,14 @@ impl jsonrpc_ws_server::SessionStats for TrackOpenWsConnections { metrics::decrement_gauge!("api.ws.open_sessions", 1.0); } } + +async fn resolve_block( + connection: &mut StorageProcessor<'_>, + block: api::BlockId, + method_name: &'static str, +) -> Result { + let result = connection.blocks_web3_dal().resolve_block_id(block).await; + result + .map_err(|err| internal_error(method_name, err))? + .ok_or(Web3Error::NoBlock) +} diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs index d3c09e85e2e9..bd5cdf2c3ee2 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -1,37 +1,52 @@ -use crate::api_server::execution_sandbox::execute_tx_eth_call; -use crate::api_server::web3::backend_jsonrpc::namespaces::debug::TracerConfig; -use std::time::Instant; +use std::{sync::Arc, time::Instant}; + use zksync_contracts::{ BaseSystemContracts, BaseSystemContractsHashes, PLAYGROUND_BLOCK_BOOTLOADER_CODE, }; use zksync_dal::ConnectionPool; -use zksync_types::api::{BlockId, BlockNumber, DebugCall, ResultDebugCall}; -use zksync_types::transaction_request::{l2_tx_from_call_req, CallRequest}; -use zksync_types::vm_trace::{Call, VmTrace}; -use zksync_types::{H256, USED_BOOTLOADER_MEMORY_BYTES}; +use zksync_state::FactoryDepsCache; +use zksync_types::{ + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, + transaction_request::{l2_tx_from_call_req, CallRequest}, + vm_trace::{Call, VmTrace}, + AccountTreeId, H256, USED_BOOTLOADER_MEMORY_BYTES, +}; use zksync_web3_decl::error::Web3Error; +use crate::api_server::{ + execution_sandbox::{execute_tx_eth_call, BlockArgs, TxSharedArgs, VmConcurrencyLimiter}, + tx_sender::SubmitTxError, + web3::{backend_jsonrpc::error::internal_error, resolve_block}, +}; + #[derive(Debug, Clone)] pub struct DebugNamespace { - pub connection_pool: ConnectionPool, - pub fair_l2_gas_price: u64, - pub base_system_contracts: BaseSystemContracts, - pub vm_execution_cache_misses_limit: Option, + connection_pool: ConnectionPool, + fair_l2_gas_price: u64, + base_system_contracts: BaseSystemContracts, + vm_execution_cache_misses_limit: Option, + vm_concurrency_limiter: Arc, + factory_deps_cache: FactoryDepsCache, } impl DebugNamespace { - pub fn new( + pub async fn new( connection_pool: ConnectionPool, base_system_contract_hashes: BaseSystemContractsHashes, fair_l2_gas_price: u64, vm_execution_cache_misses_limit: Option, + vm_concurrency_limiter: Arc, + factory_deps_cache: FactoryDepsCache, ) -> Self { - let mut storage = connection_pool.access_storage_blocking(); + let mut storage = connection_pool.access_storage_tagged("api").await; - let mut base_system_contracts = storage.storage_dal().get_base_system_contracts( - base_system_contract_hashes.bootloader, - base_system_contract_hashes.default_aa, - ); + let mut base_system_contracts = storage + .storage_dal() + .get_base_system_contracts( + base_system_contract_hashes.bootloader, + base_system_contract_hashes.default_aa, + ) + .await; drop(storage); @@ -41,23 +56,29 @@ impl DebugNamespace { fair_l2_gas_price, base_system_contracts, vm_execution_cache_misses_limit, + vm_concurrency_limiter, + factory_deps_cache, } } #[tracing::instrument(skip(self))] - pub fn debug_trace_block_impl( + pub async fn debug_trace_block_impl( &self, block: BlockId, options: Option, ) -> Result, Web3Error> { + const METHOD_NAME: &str = "debug_trace_block"; + let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); - let call_trace = self - .connection_pool - .access_storage_blocking() + let mut connection = self.connection_pool.access_storage_tagged("api").await; + let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; + let call_trace = connection .blocks_web3_dal() - .get_trace_for_miniblock(block)?; + .get_trace_for_miniblock(block_number) + .await; + Ok(call_trace .into_iter() .map(|call_trace| { @@ -71,7 +92,7 @@ impl DebugNamespace { } #[tracing::instrument(skip(self))] - pub fn debug_trace_transaction_impl( + pub async fn debug_trace_transaction_impl( &self, tx_hash: H256, options: Option, @@ -81,9 +102,11 @@ impl DebugNamespace { .unwrap_or(false); let call_trace = self .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_dal() - .get_call_trace(tx_hash); + .get_call_trace(tx_hash) + .await; call_trace.map(|call_trace| { let mut result: DebugCall = call_trace.into(); if only_top_call { @@ -94,7 +117,7 @@ impl DebugNamespace { } #[tracing::instrument(skip(self, request, block))] - pub fn debug_trace_call_impl( + pub async fn debug_trace_call_impl( &self, request: CallRequest, block: Option, @@ -104,22 +127,35 @@ impl DebugNamespace { let only_top_call = options .map(|options| options.tracer_config.only_top_call) .unwrap_or(false); + let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let mut connection = self.connection_pool.access_storage_tagged("api").await; + let block_args = BlockArgs::new(&mut connection, block) + .await + .map_err(|err| internal_error("debug_trace_call", err))? + .ok_or(Web3Error::NoBlock)?; + drop(connection); + let tx = l2_tx_from_call_req(request, USED_BOOTLOADER_MEMORY_BYTES)?; - let enforced_base_fee = Some(tx.common_data.fee.max_fee_per_gas.as_u64()); + let shared_args = self.shared_args(); + let vm_permit = self.vm_concurrency_limiter.acquire().await; // We don't need properly trace if we only need top call let result = execute_tx_eth_call( - &self.connection_pool, + &vm_permit, + shared_args, + self.connection_pool.clone(), tx.clone(), - block, - 100000, - self.fair_l2_gas_price, - enforced_base_fee, - &self.base_system_contracts, + block_args, self.vm_execution_cache_misses_limit, !only_top_call, - )?; + ) + .await + .map_err(|err| { + let submit_tx_error = SubmitTxError::from(err); + Web3Error::SubmitTransactionError(submit_tx_error.to_string(), submit_tx_error.data()) + })?; + drop(vm_permit); // Unblock other VMs to enter. let (output, revert_reason) = match result.revert_reason { Some(result) => (vec![], Some(result.revert_reason.to_string())), @@ -127,11 +163,8 @@ impl DebugNamespace { result .return_data .into_iter() - .flat_map(|val| { - let bytes: [u8; 32] = val.into(); - bytes.to_vec() - }) - .collect::>(), + .flat_map(<[u8; 32]>::from) + .collect(), None, ), }; @@ -152,4 +185,14 @@ impl DebugNamespace { metrics::histogram!("api.web3.call", start.elapsed(), "method" => "debug_trace_call"); Ok(call.into()) } + + fn shared_args(&self) -> TxSharedArgs { + TxSharedArgs { + operator_account: AccountTreeId::default(), + l1_gas_price: 100_000, + fair_l2_gas_price: self.fair_l2_gas_price, + base_system_contracts: self.base_system_contracts.clone(), + factory_deps_cache: self.factory_deps_cache.clone(), + } + } } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/en.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/en.rs new file mode 100644 index 000000000000..c9445f25e993 --- /dev/null +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/en.rs @@ -0,0 +1,50 @@ +use zksync_types::{api::en::SyncBlock, MiniblockNumber}; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + api_server::{web3::backend_jsonrpc::error::internal_error, web3::state::RpcState}, + l1_gas_price::L1GasPriceProvider, +}; + +/// Namespace for External Node unique methods. +/// Main use case for it is the EN synchronization. +#[derive(Debug)] +pub struct EnNamespace { + pub state: RpcState, +} + +impl Clone for EnNamespace { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + } + } +} + +impl EnNamespace { + pub fn new(state: RpcState) -> Self { + Self { state } + } + + #[tracing::instrument(skip(self))] + pub async fn sync_l2_block_impl( + &self, + block_number: MiniblockNumber, + include_transactions: bool, + ) -> Result, Web3Error> { + let mut storage = self + .state + .connection_pool + .access_storage_tagged("api") + .await; + storage + .sync_dal() + .sync_block( + block_number, + self.state.tx_sender.0.sender_config.fee_account_addr, + include_transactions, + ) + .await + .map_err(|err| internal_error("en_syncL2Block", err)) + } +} diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs index 630be1e25c08..10c60dba0cce 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,7 +1,7 @@ -use std::time::Instant; - use itertools::Itertools; +use std::time::Instant; + use zksync_types::{ api::{ BlockId, BlockNumber, GetLogsFilter, Transaction, TransactionId, TransactionReceipt, @@ -14,25 +14,11 @@ use zksync_types::{ AccountTreeId, Bytes, MiniblockNumber, StorageKey, H256, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, U256, }; - +use zksync_utils::u256_to_h256; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, }; - -use crate::{ - api_server::{ - execution_sandbox::execute_tx_eth_call, web3::backend_jsonrpc::error::internal_error, - web3::state::RpcState, - }, - l1_gas_price::L1GasPriceProvider, -}; - -use zksync_utils::u256_to_h256; - -#[cfg(feature = "openzeppelin_tests")] -use zksync_utils::bytecode::hash_bytecode; - #[cfg(feature = "openzeppelin_tests")] use { zksync_eth_signer::EthereumSigner, @@ -41,14 +27,31 @@ use { transaction_request::Eip712Meta, web3::contract::tokens::Tokenizable, Eip712Domain, EIP_712_TX_TYPE, }, + zksync_utils::bytecode::hash_bytecode, +}; + +use crate::{ + api_server::{ + execution_sandbox::BlockArgs, + web3::{backend_jsonrpc::error::internal_error, resolve_block, state::RpcState}, + }, + l1_gas_price::L1GasPriceProvider, }; pub const EVENT_TOPIC_NUMBER_LIMIT: usize = 4; pub const PROTOCOL_VERSION: &str = "zks/1"; -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct EthNamespace { - pub state: RpcState, + state: RpcState, +} + +impl Clone for EthNamespace { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + } + } } impl EthNamespace { @@ -57,25 +60,27 @@ impl EthNamespace { } #[tracing::instrument(skip(self))] - pub fn get_block_number_impl(&self) -> Result { - let start = Instant::now(); - let endpoint_name = "get_block_number"; + pub async fn get_block_number_impl(&self) -> Result { + const METHOD_NAME: &str = "get_block_number"; + let start = Instant::now(); let block_number = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_sealed_miniblock_number() + .await .map(|n| U64::from(n.0)) - .map_err(|err| internal_error(endpoint_name, err)); + .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); block_number } #[tracing::instrument(skip(self, request, block))] - pub fn call_impl( + pub async fn call_impl( &self, request: CallRequest, block: Option, @@ -83,14 +88,24 @@ impl EthNamespace { let start = Instant::now(); let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let mut connection = self + .state + .connection_pool + .access_storage_tagged("api") + .await; + let block_args = BlockArgs::new(&mut connection, block) + .await + .map_err(|err| internal_error("eth_call", err))? + .ok_or(Web3Error::NoBlock)?; + drop(connection); let mut request_with_set_nonce = request.clone(); self.state - .set_nonce_for_call_request(&mut request_with_set_nonce)?; + .set_nonce_for_call_request(&mut request_with_set_nonce) + .await?; #[cfg(not(feature = "openzeppelin_tests"))] let tx = l2_tx_from_call_req(request, self.state.api_config.max_tx_size)?; - #[cfg(feature = "openzeppelin_tests")] let tx: L2Tx = self .convert_evm_like_deploy_requests(tx_req_from_call_req( @@ -99,38 +114,9 @@ impl EthNamespace { )?)? .try_into()?; - let enforced_base_fee = Some(tx.common_data.fee.max_fee_per_gas.as_u64()); - let result = execute_tx_eth_call( - &self.state.connection_pool, - tx, - block, - self.state - .tx_sender - .0 - .l1_gas_price_source - .estimate_effective_gas_price(), - self.state.tx_sender.0.sender_config.fair_l2_gas_price, - enforced_base_fee, - &self.state.tx_sender.0.playground_base_system_contracts, - self.state - .tx_sender - .0 - .sender_config - .vm_execution_cache_misses_limit, - false, - )?; - - let mut res_bytes = match result.revert_reason { - Some(result) => result.original_data, - None => result - .return_data - .into_iter() - .flat_map(|val| { - let bytes: [u8; 32] = val.into(); - bytes.to_vec() - }) - .collect::>(), - }; + let call_result = self.state.tx_sender.eth_call(block_args, tx).await; + let mut res_bytes = call_result + .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; if cfg!(feature = "openzeppelin_tests") && res_bytes.len() >= 100 @@ -144,7 +130,7 @@ impl EthNamespace { } #[tracing::instrument(skip(self, request, _block))] - pub fn estimate_gas_impl( + pub async fn estimate_gas_impl( &self, request: CallRequest, _block: Option, @@ -153,7 +139,8 @@ impl EthNamespace { let mut request_with_gas_per_pubdata_overridden = request; self.state - .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden)?; + .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden) + .await?; if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { if eip712_meta.gas_per_pubdata == U256::zero() { @@ -200,6 +187,7 @@ impl EthNamespace { .state .tx_sender .get_txs_fee_in_wei(tx.into(), scale_factor, acceptable_overestimation) + .await .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas"); @@ -208,49 +196,54 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn gas_price_impl(&self) -> Result { - let start = Instant::now(); - let endpoint_name = "gas_price"; + const METHOD_NAME: &str = "gas_price"; + let start = Instant::now(); let price = self.state.tx_sender.gas_price(); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(price.into()) } #[tracing::instrument(skip(self))] - pub fn get_balance_impl( + pub async fn get_balance_impl( &self, address: Address, block: Option, ) -> Result { - let start = Instant::now(); - let endpoint_name = "get_balance"; + const METHOD_NAME: &str = "get_balance"; - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); - let balance = self + let start = Instant::now(); + let mut connection = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await; + let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; + let balance = connection .storage_web3_dal() .standard_token_historical_balance( AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), AccountTreeId::new(address), - block, + block_number, ) - .map_err(|err| internal_error(endpoint_name, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - balance + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(balance) } #[tracing::instrument(skip(self, filter))] - pub fn get_logs_impl(&self, mut filter: Filter) -> Result, Web3Error> { + pub async fn get_logs_impl(&self, mut filter: Filter) -> Result, Web3Error> { let start = Instant::now(); - let (from_block, to_block) = self.state.resolve_filter_block_range(&filter)?; + self.state.resolve_filter_block_hash(&mut filter).await?; + let (from_block, to_block) = self.state.resolve_filter_block_range(&filter).await?; filter.to_block = Some(BlockNumber::Number(to_block.0.into())); let changes = self - .filter_changes(TypedFilter::Events(filter, from_block))? + .filter_changes(TypedFilter::Events(filter, from_block)) + .await? .0; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "get_logs"); @@ -260,39 +253,39 @@ impl EthNamespace { }) } - #[tracing::instrument(skip(self))] - pub fn get_filter_logs_impl(&self, idx: U256) -> Result { + // #[tracing::instrument(skip(self))] + pub async fn get_filter_logs_impl(&self, idx: U256) -> Result { let start = Instant::now(); - let filter = match self - .state - .installed_filters - .read() - .unwrap() - .get(idx) - .cloned() - { + // Note: We have to keep this as a separate variable, since otherwise the lock guard would exist + // for duration of the whole `match` block, and this guard is not `Send`. This would make the whole future + // not `Send`, since `match` has an `await` point. + let maybe_filter = self.state.installed_filters.read().await.get(idx).cloned(); + let filter = match maybe_filter { Some(TypedFilter::Events(filter, _)) => { - let from_block = self.state.resolve_filter_block_number(filter.from_block)?; + let from_block = self + .state + .resolve_filter_block_number(filter.from_block) + .await?; TypedFilter::Events(filter, from_block) } _ => return Err(Web3Error::FilterNotFound), }; - let logs = self.filter_changes(filter)?.0; + let logs = self.filter_changes(filter).await?.0; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "get_filter_logs"); Ok(logs) } #[tracing::instrument(skip(self))] - pub fn get_block_impl( + pub async fn get_block_impl( &self, block: BlockId, full_transactions: bool, ) -> Result>, Web3Error> { let start = Instant::now(); - let endpoint_name = if full_transactions { + let method_name = if full_transactions { "get_block_with_txs" } else { "get_block" @@ -301,57 +294,63 @@ impl EthNamespace { let block = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_block_by_web3_block_id(block, full_transactions, self.state.api_config.l2_chain_id) - .map_err(|err| internal_error(endpoint_name, err)); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + .await + .map_err(|err| internal_error(method_name, err)); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => method_name); block } #[tracing::instrument(skip(self))] - pub fn get_block_transaction_count_impl( + pub async fn get_block_transaction_count_impl( &self, block: BlockId, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_block_transaction_count"; + const METHOD_NAME: &str = "get_block_transaction_count"; + let start = Instant::now(); let tx_count = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_block_tx_count(block) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); tx_count } #[tracing::instrument(skip(self))] - pub fn get_code_impl( + pub async fn get_code_impl( &self, address: Address, block: Option, ) -> Result { - let start = Instant::now(); - let endpoint_name = "get_code"; - - let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + const METHOD_NAME: &str = "get_code"; - let contract_code = self + let start = Instant::now(); + let mut connection = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await; + let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); + let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; + let contract_code = connection .storage_web3_dal() - .get_contract_code(address, block) - .map_err(|err| internal_error(endpoint_name, err))?; + .get_contract_code_unchecked(address, block_number) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - contract_code.map(|code| code.unwrap_or_default().into()) + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(contract_code.unwrap_or_default().into()) } #[tracing::instrument(skip(self))] @@ -360,34 +359,36 @@ impl EthNamespace { } #[tracing::instrument(skip(self))] - pub fn get_storage_at_impl( + pub async fn get_storage_at_impl( &self, address: Address, idx: U256, block: Option, ) -> Result { - let start = Instant::now(); - let endpoint_name = "get_storage_at"; + const METHOD_NAME: &str = "get_storage_at"; + let start = Instant::now(); let block = block.unwrap_or(BlockId::Number(BlockNumber::Pending)); - let value = self + let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); + let mut connection = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await; + let block_number = resolve_block(&mut connection, block, METHOD_NAME).await?; + let value = connection .storage_web3_dal() - .get_historical_value( - &StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)), - block, - ) - .map_err(|err| internal_error(endpoint_name, err))?; + .get_historical_value_unchecked(&storage_key, block_number) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - value + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); + Ok(value) } /// Account nonce. #[tracing::instrument(skip(self))] - pub fn get_transaction_count_impl( + pub async fn get_transaction_count_impl( &self, address: Address, block: Option, @@ -399,22 +400,26 @@ impl EthNamespace { BlockId::Number(BlockNumber::Pending) => "get_pending_transaction_count", _ => "get_historical_transaction_count", }; + let mut connection = self + .state + .connection_pool + .access_storage_tagged("api") + .await; let full_nonce = match block { - BlockId::Number(BlockNumber::Pending) => self - .state - .connection_pool - .access_storage_blocking() + BlockId::Number(BlockNumber::Pending) => connection .transactions_web3_dal() .next_nonce_by_initiator_account(address) + .await .map_err(|err| internal_error(method_name, err)), - _ => self - .state - .connection_pool - .access_storage_blocking() - .storage_web3_dal() - .get_address_historical_nonce(address, block) - .map_err(|err| internal_error(method_name, err))?, + _ => { + let block_number = resolve_block(&mut connection, block, method_name).await?; + connection + .storage_web3_dal() + .get_address_historical_nonce(address, block_number) + .await + .map_err(|err| internal_error(method_name, err)) + } }; let account_nonce = full_nonce.map(|nonce| decompose_full_nonce(nonce).0); @@ -424,31 +429,33 @@ impl EthNamespace { } #[tracing::instrument(skip(self))] - pub fn get_transaction_impl( + pub async fn get_transaction_impl( &self, id: TransactionId, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_transaction"; + const METHOD_NAME: &str = "get_transaction"; + let start = Instant::now(); let mut transaction = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_transaction(id, self.state.api_config.l2_chain_id) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); if let Some(proxy) = &self.state.tx_sender.0.proxy { // We're running an external node - check the proxy cache in // case the transaction was proxied but not yet synced back to us if let Ok(Some(tx)) = &transaction { // If the transaction is already in the db, remove it from cache - proxy.forget_tx(tx.hash) + proxy.forget_tx(tx.hash).await } else { if let TransactionId::Hash(hash) = id { // If the transaction is not in the db, check the cache - if let Some(tx) = proxy.find_tx(hash) { + if let Some(tx) = proxy.find_tx(hash).await { transaction = Ok(Some(tx.into())); } } @@ -456,30 +463,33 @@ impl EthNamespace { // If the transaction is not in the db or cache, query main node transaction = proxy .request_tx(id) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); } } } - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); transaction } #[tracing::instrument(skip(self))] - pub fn get_transaction_receipt_impl( + pub async fn get_transaction_receipt_impl( &self, hash: H256, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_transaction_receipt"; + const METHOD_NAME: &str = "get_transaction_receipt"; + let start = Instant::now(); let mut receipt = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_transaction_receipt(hash) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); if let Some(proxy) = &self.state.tx_sender.0.proxy { // We're running an external node @@ -487,13 +497,14 @@ impl EthNamespace { // If the transaction is not in the db, query main node. // Because it might be the case that it got rejected in state keeper // and won't be synced back to us, but we still want to return a receipt. - // We want to only forwared these kinds of receipts because otherwise + // We want to only forward these kinds of receipts because otherwise // clients will assume that the transaction they got the receipt for // was already processed on the EN (when it was not), // and will think that the state has already been updated on the EN (when it was not). if let Ok(Some(main_node_receipt)) = proxy .request_tx_receipt(hash) - .map_err(|err| internal_error(endpoint_name, err)) + .await + .map_err(|err| internal_error(METHOD_NAME, err)) { if main_node_receipt.status == Some(0.into()) && main_node_receipt.block_number.is_none() @@ -505,36 +516,38 @@ impl EthNamespace { } } - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); receipt } #[tracing::instrument(skip(self))] - pub fn new_block_filter_impl(&self) -> Result { - let start = Instant::now(); - let endpoint_name = "new_block_filter"; + pub async fn new_block_filter_impl(&self) -> Result { + const METHOD_NAME: &str = "new_block_filter"; + let start = Instant::now(); let last_block_number = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_sealed_miniblock_number() - .map_err(|err| internal_error(endpoint_name, err))?; + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; let idx = self .state .installed_filters .write() - .unwrap() + .await .add(TypedFilter::Blocks(last_block_number)); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(idx) } #[tracing::instrument(skip(self, filter))] - pub fn new_filter_impl(&self, filter: Filter) -> Result { + pub async fn new_filter_impl(&self, mut filter: Filter) -> Result { let start = Instant::now(); if let Some(topics) = filter.topics.as_ref() { @@ -542,12 +555,13 @@ impl EthNamespace { return Err(Web3Error::TooManyTopics); } } - let from_block = self.state.get_filter_from_block(&filter)?; + self.state.resolve_filter_block_hash(&mut filter).await?; + let from_block = self.state.get_filter_from_block(&filter).await?; let idx = self .state .installed_filters .write() - .unwrap() + .await .add(TypedFilter::Events(filter, from_block)); metrics::histogram!("api.web3.call", start.elapsed(), "method" => "new_filter"); @@ -555,50 +569,47 @@ impl EthNamespace { } #[tracing::instrument(skip(self))] - pub fn new_pending_transaction_filter_impl(&self) -> U256 { + pub async fn new_pending_transaction_filter_impl(&self) -> U256 { let start = Instant::now(); - let idx = - self.state - .installed_filters - .write() - .unwrap() - .add(TypedFilter::PendingTransactions( - chrono::Utc::now().naive_utc(), - )); + let idx = self + .state + .installed_filters + .write() + .await + .add(TypedFilter::PendingTransactions( + chrono::Utc::now().naive_utc(), + )); metrics::histogram!("api.web3.call", start.elapsed(), "method" => "new_pending_transaction_filter"); idx } #[tracing::instrument(skip(self))] - pub fn get_filter_changes_impl(&self, idx: U256) -> Result { + pub async fn get_filter_changes_impl(&self, idx: U256) -> Result { let start = Instant::now(); - let filter = match self + let filter = self .state .installed_filters .read() - .unwrap() + .await .get(idx) .cloned() - { - Some(filter) => filter, - None => return Err(Web3Error::FilterNotFound), - }; + .ok_or(Web3Error::FilterNotFound)?; - let result = match self.filter_changes(filter) { + let result = match self.filter_changes(filter).await { Ok((changes, updated_filter)) => { self.state .installed_filters .write() - .unwrap() + .await .update(idx, updated_filter); Ok(changes) } Err(Web3Error::LogsLimitExceeded(_, _, _)) => { // The filter was not being polled for a long time, so we remove it. - self.state.installed_filters.write().unwrap().remove(idx); + self.state.installed_filters.write().await.remove(idx); Err(Web3Error::FilterNotFound) } Err(err) => Err(err), @@ -609,10 +620,10 @@ impl EthNamespace { } #[tracing::instrument(skip(self))] - pub fn uninstall_filter_impl(&self, idx: U256) -> bool { + pub async fn uninstall_filter_impl(&self, idx: U256) -> bool { let start = Instant::now(); - let removed = self.state.installed_filters.write().unwrap().remove(idx); + let removed = self.state.installed_filters.write().await.remove(idx); metrics::histogram!("api.web3.call", start.elapsed(), "method" => "uninstall_filter"); removed @@ -624,29 +635,24 @@ impl EthNamespace { } #[tracing::instrument(skip(self, tx_bytes))] - pub fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { + pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { let start = Instant::now(); let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; tx.set_input(tx_bytes.0, hash); - let submit_res = match self.state.tx_sender.submit_tx(tx) { - Err(err) => { - vlog::debug!("Send raw transaction error {}", err); - metrics::counter!( - "api.submit_tx_error", - 1, - "reason" => err.grafana_error_code() - ); - Err(Web3Error::SubmitTransactionError( - err.to_string(), - err.data(), - )) - } - Ok(_) => Ok(hash), - }; + let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = submit_result.map(|_| hash).map_err(|err| { + vlog::debug!("Send raw transaction error: {err}"); + metrics::counter!( + "api.submit_tx_error", + 1, + "reason" => err.grafana_error_code() + ); + Web3Error::SubmitTransactionError(err.to_string(), err.data()) + }); metrics::histogram!("api.web3.call", start.elapsed(), "method" => "send_raw_transaction"); - submit_res + submit_result } #[tracing::instrument(skip(self))] @@ -656,7 +662,7 @@ impl EthNamespace { #[tracing::instrument(skip(self))] pub fn syncing_impl(&self) -> SyncState { - if let Some(state) = self.state.sync_state.as_ref() { + if let Some(state) = &self.state.sync_state { // Node supports syncing process (i.e. not the main node). if state.is_synced() { SyncState::NotSyncing @@ -674,21 +680,23 @@ impl EthNamespace { } #[tracing::instrument(skip(self, typed_filter))] - fn filter_changes( + async fn filter_changes( &self, typed_filter: TypedFilter, ) -> Result<(FilterChanges, TypedFilter), Web3Error> { - let method_name = "filter_changes"; + const METHOD_NAME: &str = "filter_changes"; let res = match typed_filter { TypedFilter::Blocks(from_block) => { let (block_hashes, last_block_number) = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_block_hashes_after(from_block, self.state.api_config.req_entities_limit) - .map_err(|err| internal_error(method_name, err))?; + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; ( FilterChanges::Hashes(block_hashes), TypedFilter::Blocks(last_block_number.unwrap_or(from_block)), @@ -698,13 +706,15 @@ impl EthNamespace { let (tx_hashes, last_timestamp) = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_pending_txs_hashes_after( from_timestamp, Some(self.state.api_config.req_entities_limit), ) - .map_err(|err| internal_error(method_name, err))?; + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; ( FilterChanges::Hashes(tx_hashes), TypedFilter::PendingTransactions(last_timestamp.unwrap_or(from_timestamp)), @@ -736,30 +746,42 @@ impl EthNamespace { addresses, topics, }; + let to_block = self + .state + .resolve_filter_block_number(filter.to_block) + .await?; - let mut storage = self.state.connection_pool.access_storage_blocking(); + let mut storage = self + .state + .connection_pool + .access_storage_tagged("api") + .await; - // Check if there are more than `req_entities_limit` logs that satisfies filter. + // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. // In this case we should return error and suggest requesting logs with smaller block range. - if let Some(miniblock_number) = storage - .events_web3_dal() - .get_log_block_number( - get_logs_filter.clone(), - self.state.api_config.req_entities_limit, - ) - .map_err(|err| internal_error(method_name, err))? - { - return Err(Web3Error::LogsLimitExceeded( - self.state.api_config.req_entities_limit, - from_block.0, - miniblock_number.0 - 1, - )); + if from_block != to_block { + if let Some(miniblock_number) = storage + .events_web3_dal() + .get_log_block_number( + get_logs_filter.clone(), + self.state.api_config.req_entities_limit, + ) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + { + return Err(Web3Error::LogsLimitExceeded( + self.state.api_config.req_entities_limit, + from_block.0, + miniblock_number.0 - 1, + )); + } } let logs = storage .events_web3_dal() - .get_logs(get_logs_filter, self.state.api_config.req_entities_limit) - .map_err(|err| internal_error(method_name, err))?; + .get_logs(get_logs_filter, i32::MAX as usize) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; let new_from_block = logs .last() .map(|log| MiniblockNumber(log.block_number.unwrap().as_u32())) @@ -784,7 +806,8 @@ impl EthNamespace { } else { self.state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .next_nonce_by_initiator_account(transaction_request.from) .map_err(|err| internal_error("send_transaction", err))? @@ -818,12 +841,10 @@ impl EthNamespace { { let chain_id = self.state.api_config.l2_chain_id; let domain = Eip712Domain::new(chain_id); - let signature = crate::block_on(async { - signer - .sign_typed_data(&domain, &transaction_request) - .await - .map_err(|err| internal_error("send_transaction", err)) - })?; + let signature = signer + .sign_typed_data(&domain, &transaction_request) + .await + .map_err(|err| internal_error("send_transaction", err))?; let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id); Bytes(encoded_tx) diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs index 9601cd889254..4cade2ff1953 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs @@ -1,24 +1,54 @@ use std::collections::HashMap; -use std::sync::{Arc, RwLock}; -use zksync_types::web3::types::H128; -use zksync_web3_decl::types::{PubSubFilter, PubSubResult}; +use std::sync::Arc; use jsonrpc_core::error::{Error, ErrorCode}; use jsonrpc_pubsub::typed; use jsonrpc_pubsub::SubscriptionId; +use tokio::sync::RwLock; + +use zksync_types::web3::types::H128; +use zksync_web3_decl::types::{PubSubFilter, PubSubResult}; use super::eth::EVENT_TOPIC_NUMBER_LIMIT; pub type SubscriptionMap = Arc>>; -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Copy)] +enum SubscriptionType { + Blocks, + Txs, + Logs, +} + +impl SubscriptionType { + fn as_str(&self) -> &'static str { + match self { + Self::Blocks => "blocks", + Self::Txs => "txs", + Self::Logs => "logs", + } + } +} + +#[derive(Debug, Clone)] pub struct EthSubscribe { + // `jsonrpc` backend executes task subscription on a separate thread that has no tokio context. + pub runtime_handle: tokio::runtime::Handle, pub active_block_subs: SubscriptionMap>, pub active_tx_subs: SubscriptionMap>, pub active_log_subs: SubscriptionMap<(typed::Sink, PubSubFilter)>, } impl EthSubscribe { + pub fn new(runtime_handle: tokio::runtime::Handle) -> Self { + Self { + runtime_handle, + active_block_subs: SubscriptionMap::default(), + active_tx_subs: SubscriptionMap::default(), + active_log_subs: SubscriptionMap::default(), + } + } + fn assign_id( subscriber: typed::Subscriber, ) -> (typed::Sink, SubscriptionId) { @@ -39,23 +69,24 @@ impl EthSubscribe { } #[tracing::instrument(skip(self, subscriber, params))] - pub fn sub( + pub async fn sub( &self, subscriber: typed::Subscriber, sub_type: String, params: Option, ) { - let mut block_subs = self.active_block_subs.write().unwrap(); - let mut tx_subs = self.active_tx_subs.write().unwrap(); - let mut log_subs = self.active_log_subs.write().unwrap(); - match sub_type.as_str() { + let sub_type = match sub_type.as_str() { "newHeads" => { + let mut block_subs = self.active_block_subs.write().await; let (sink, id) = Self::assign_id(subscriber); block_subs.insert(id, sink); + Some(SubscriptionType::Blocks) } "newPendingTransactions" => { + let mut tx_subs = self.active_tx_subs.write().await; let (sink, id) = Self::assign_id(subscriber); tx_subs.insert(id, sink); + Some(SubscriptionType::Txs) } "logs" => { let filter = params.map(serde_json::from_value).transpose(); @@ -70,42 +101,49 @@ impl EthSubscribe { > EVENT_TOPIC_NUMBER_LIMIT { Self::reject(subscriber); + None } else { + let mut log_subs = self.active_log_subs.write().await; let (sink, id) = Self::assign_id(subscriber); log_subs.insert(id, (sink, filter)); + Some(SubscriptionType::Logs) } } - Err(_) => Self::reject(subscriber), + Err(_) => { + Self::reject(subscriber); + None + } } } "syncing" => { let (sink, _) = Self::assign_id(subscriber); let _ = sink.notify(Ok(PubSubResult::Syncing(false))); + None + } + _ => { + Self::reject(subscriber); + None } - _ => Self::reject(subscriber), }; - metrics::gauge!("api.web3.pubsub.active_subscribers", block_subs.len() as f64, "subscription_type" => "blocks"); - metrics::gauge!("api.web3.pubsub.active_subscribers", tx_subs.len() as f64, "subscription_type" => "txs"); - metrics::gauge!("api.web3.pubsub.active_subscribers", log_subs.len() as f64, "subscription_type" => "logs"); + if let Some(sub_type) = sub_type { + metrics::increment_gauge!("api.web3.pubsub.active_subscribers", 1f64, "subscription_type" => sub_type.as_str()); + } } #[tracing::instrument(skip(self))] - pub fn unsub(&self, id: SubscriptionId) -> Result { - let removed = self - .active_block_subs - .write() - .unwrap() - .remove(&id) - .or_else(|| self.active_tx_subs.write().unwrap().remove(&id)) - .or_else(|| { - self.active_log_subs - .write() - .unwrap() - .remove(&id) - .map(|(sink, _)| sink) - }); - if removed.is_some() { + pub async fn unsub(&self, id: SubscriptionId) -> Result { + let removed = if self.active_block_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Blocks) + } else if self.active_tx_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Txs) + } else if self.active_log_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Logs) + } else { + None + }; + if let Some(sub_type) = removed { + metrics::decrement_gauge!("api.web3.pubsub.active_subscribers", 1f64, "subscription_type" => sub_type.as_str()); Ok(true) } else { Err(Error { diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs index d7f5482e1178..08223a7d4cdd 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/mod.rs @@ -1,25 +1,31 @@ //! Actual implementation of Web3 API namespaces logic, not tied to the backend //! used to create a JSON RPC server. -pub mod debug; -pub mod eth; -pub mod eth_subscribe; -pub mod net; -pub mod web3; -pub mod zks; - use num::{rational::Ratio, BigUint}; + use zksync_types::U256; use zksync_utils::{biguint_to_u256, u256_to_biguint}; +mod debug; +mod en; +mod eth; +mod eth_subscribe; +mod net; +mod web3; +mod zks; + pub use self::{ - debug::DebugNamespace, eth::EthNamespace, eth_subscribe::EthSubscribe, net::NetNamespace, - web3::Web3Namespace, zks::ZksNamespace, + debug::DebugNamespace, + en::EnNamespace, + eth::EthNamespace, + eth_subscribe::{EthSubscribe, SubscriptionMap}, + net::NetNamespace, + web3::Web3Namespace, + zks::ZksNamespace, }; pub fn scale_u256(val: U256, scale_factor: &Ratio) -> U256 { let val_as_ratio = &Ratio::from_integer(u256_to_biguint(val)); let result = (val_as_ratio * scale_factor).ceil(); - biguint_to_u256(result.to_integer()) } diff --git a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs index d68521ee3dbb..f6117c48dba0 100644 --- a/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/bin/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -3,20 +3,19 @@ use std::{collections::HashMap, convert::TryInto}; use bigdecimal::{BigDecimal, Zero}; -use zksync_mini_merkle_tree::mini_merkle_tree_proof; +use zksync_mini_merkle_tree::MiniMerkleTree; #[cfg(feature = "openzeppelin_tests")] use zksync_types::Bytes; use zksync_types::{ api::{BridgeAddresses, GetLogsFilter, L2ToL1LogProof, TransactionDetails, U64}, - commitment::CommitmentSerializable, + commitment::SerializeCommitment, explorer_api::{BlockDetails, L1BatchDetails}, fee::Fee, l1::L1Tx, l2_to_l1_log::L2ToL1Log, tokens::ETHEREUM_ADDRESS, transaction_request::{l2_tx_from_call_req, CallRequest}, - vm_trace::{ContractSourceDebugInfo, VmDebugTrace}, L1BatchNumber, MiniblockNumber, Transaction, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, }; @@ -31,23 +30,32 @@ use crate::fee_ticker::FeeTicker; use crate::fee_ticker::{error::TickerError, TokenPriceRequestType}; use crate::l1_gas_price::L1GasPriceProvider; -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ZksNamespace { pub state: RpcState, } +impl Clone for ZksNamespace { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + } + } +} + impl ZksNamespace { pub fn new(state: RpcState) -> Self { Self { state } } #[tracing::instrument(skip(self, request))] - pub fn estimate_fee_impl(&self, request: CallRequest) -> Result { + pub async fn estimate_fee_impl(&self, request: CallRequest) -> Result { let start = Instant::now(); let mut request_with_gas_per_pubdata_overridden = request; self.state - .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden)?; + .set_nonce_for_call_request(&mut request_with_gas_per_pubdata_overridden) + .await?; if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); @@ -63,14 +71,17 @@ impl ZksNamespace { tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); - let fee = self.estimate_fee(tx.into())?; + let fee = self.estimate_fee(tx.into()).await?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_fee"); Ok(fee) } #[tracing::instrument(skip(self, request))] - pub fn estimate_l1_to_l2_gas_impl(&self, request: CallRequest) -> Result { + pub async fn estimate_l1_to_l2_gas_impl( + &self, + request: CallRequest, + ) -> Result { let start = Instant::now(); let mut request_with_gas_per_pubdata_overridden = request; // When we're estimating fee, we are trying to deduce values related to fee, so we should @@ -85,13 +96,13 @@ impl ZksNamespace { .try_into() .map_err(Web3Error::SerializationError)?; - let fee = self.estimate_fee(tx.into())?; + let fee = self.estimate_fee(tx.into()).await?; metrics::histogram!("api.web3.call", start.elapsed(), "method" => "estimate_gas_l1_to_l2"); Ok(fee.gas_limit) } - fn estimate_fee(&self, tx: Transaction) -> Result { + async fn estimate_fee(&self, tx: Transaction) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; let acceptable_overestimation = self.state.api_config.estimate_gas_acceptable_overestimation; @@ -100,6 +111,7 @@ impl ZksNamespace { .state .tx_sender .get_txs_fee_in_wei(tx, scale_factor, acceptable_overestimation) + .await .map_err(|err| Web3Error::SubmitTransactionError(err.to_string(), err.data()))?; Ok(fee) @@ -126,17 +138,23 @@ impl ZksNamespace { } #[tracing::instrument(skip(self))] - pub fn get_confirmed_tokens_impl(&self, from: u32, limit: u8) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_confirmed_tokens"; + pub async fn get_confirmed_tokens_impl( + &self, + from: u32, + limit: u8, + ) -> Result, Web3Error> { + const METHOD_NAME: &str = "get_confirmed_tokens"; + let start = Instant::now(); let tokens = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .tokens_web3_dal() .get_well_known_tokens() - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? .into_iter() .skip(from as usize) .take(limit.into()) @@ -149,111 +167,58 @@ impl ZksNamespace { }) .collect(); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(tokens) } #[tracing::instrument(skip(self))] - pub fn get_token_price_impl(&self, l2_token: Address) -> Result { - let start = Instant::now(); - let endpoint_name = "get_token_price"; + pub async fn get_token_price_impl(&self, l2_token: Address) -> Result { + const METHOD_NAME: &str = "get_token_price"; + let start = Instant::now(); let token_price_result = { - let mut storage = self.state.connection_pool.access_storage_blocking(); + let mut storage = self + .state + .connection_pool + .access_storage_tagged("api") + .await; let mut tokens_web3_dal = storage.tokens_web3_dal(); FeeTicker::get_l2_token_price( &mut tokens_web3_dal, TokenPriceRequestType::USDForOneToken, &l2_token, ) + .await }; let result = match token_price_result { Ok(price) => Ok(price), Err(TickerError::PriceNotTracked(_)) => Ok(BigDecimal::zero()), - Err(err) => Err(internal_error(endpoint_name, err)), + Err(err) => Err(internal_error(METHOD_NAME, err)), }; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - result - } - - // This method is currently to be used for internal debug purposes only. - // It should be reworked for being public (validate contract info and maybe store it elsewhere). - #[tracing::instrument(skip(self, info))] - pub fn set_contract_debug_info_impl( - &self, - address: Address, - info: ContractSourceDebugInfo, - ) -> bool { - let start = Instant::now(); - - self.state - .connection_pool - .access_storage_blocking() - .storage_dal() - .set_contract_source(address, info); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "set_contract_debug_info"); - true - } - - #[tracing::instrument(skip(self))] - pub fn get_contract_debug_info_impl( - &self, - address: Address, - ) -> Option { - let start = Instant::now(); - - let info = self - .state - .connection_pool - .access_storage_blocking() - .storage_dal() - .get_contract_source(address); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "get_contract_debug_info"); - info - } - - #[tracing::instrument(skip(self))] - pub fn get_transaction_trace_impl(&self, hash: H256) -> Option { - let start = Instant::now(); - let mut storage = self.state.connection_pool.access_storage_blocking(); - let trace = storage.transactions_dal().get_trace(hash); - let result = trace.map(|trace| { - let mut storage_dal = storage.storage_dal(); - let mut sources = HashMap::new(); - for address in trace.contracts { - let source = storage_dal.get_contract_source(address); - sources.insert(address, source); - } - VmDebugTrace { - steps: trace.steps, - sources, - } - }); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => "get_transaction_trace"); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); result } #[tracing::instrument(skip(self))] - pub fn get_all_account_balances_impl( + pub async fn get_all_account_balances_impl( &self, address: Address, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_all_balances"; + const METHOD_NAME: &str = "get_all_balances"; + let start = Instant::now(); let balances = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .accounts_dal() .get_balances_for_address(address) - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? .into_iter() .map(|(address, balance_item)| { if address == L2_ETH_TOKEN_ADDRESS { @@ -264,26 +229,31 @@ impl ZksNamespace { }) .collect(); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(balances) } #[tracing::instrument(skip(self))] - pub fn get_l2_to_l1_msg_proof_impl( + pub async fn get_l2_to_l1_msg_proof_impl( &self, block_number: MiniblockNumber, sender: Address, msg: H256, l2_log_position: Option, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_l2_to_l1_msg_proof"; + const METHOD_NAME: &str = "get_l2_to_l1_msg_proof"; - let mut storage = self.state.connection_pool.access_storage_blocking(); + let start = Instant::now(); + let mut storage = self + .state + .connection_pool + .access_storage_tagged("api") + .await; let l1_batch_number = match storage .blocks_web3_dal() .get_l1_batch_number_of_miniblock(block_number) - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? { Some(number) => number, None => return Ok(None), @@ -291,13 +261,15 @@ impl ZksNamespace { let (first_miniblock_of_l1_batch, _) = storage .blocks_web3_dal() .get_miniblock_range_of_l1_batch(l1_batch_number) - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? .expect("L1 batch should contain at least one miniblock"); let all_l1_logs_in_block = storage .blocks_web3_dal() .get_l2_to_l1_logs(l1_batch_number) - .map_err(|err| internal_error(endpoint_name, err))?; + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; // Position of l1 log in block relative to logs with identical data let l1_log_relative_position = if let Some(l2_log_position) = l2_log_position { @@ -312,7 +284,8 @@ impl ZksNamespace { }, self.state.api_config.req_entities_limit, ) - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? .iter() .position(|event| { event.block_number == Some(block_number.0.into()) @@ -343,43 +316,38 @@ impl ZksNamespace { return Ok(None); } }; - let values: Vec> = all_l1_logs_in_block - .into_iter() - .map(|a| a.to_bytes()) - .collect(); - let mut proof: Vec = mini_merkle_tree_proof( - values, - l1_log_index, - L2ToL1Log::SERIALIZED_SIZE, - L2ToL1Log::limit_per_block(), - ) - .into_iter() - .map(|elem| H256::from_slice(&elem)) - .collect(); - let root = proof.pop().unwrap(); + + let merkle_tree_leaves = all_l1_logs_in_block.iter().map(L2ToL1Log::to_bytes); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_BLOCK) + .merkle_root_and_path(l1_log_index); let msg_proof = L2ToL1LogProof { proof, root, id: l1_log_index as u32, }; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(Some(msg_proof)) } #[tracing::instrument(skip(self))] - pub fn get_l2_to_l1_log_proof_impl( + pub async fn get_l2_to_l1_log_proof_impl( &self, tx_hash: H256, index: Option, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_l2_to_l1_msg_proof"; + const METHOD_NAME: &str = "get_l2_to_l1_msg_proof"; - let mut storage = self.state.connection_pool.access_storage_blocking(); + let start = Instant::now(); + let mut storage = self + .state + .connection_pool + .access_storage_tagged("api") + .await; let (l1_batch_number, l1_batch_tx_index) = match storage .blocks_web3_dal() .get_l1_batch_info_for_tx(tx_hash) - .map_err(|err| internal_error(endpoint_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? { Some(x) => x, None => return Ok(None), @@ -388,7 +356,8 @@ impl ZksNamespace { let all_l1_logs_in_block = storage .blocks_web3_dal() .get_l2_to_l1_logs(l1_batch_number) - .map_err(|err| internal_error(endpoint_name, err))?; + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; let l1_log_index = match all_l1_logs_in_block .iter() @@ -402,131 +371,127 @@ impl ZksNamespace { } }; - let values: Vec> = all_l1_logs_in_block - .into_iter() - .map(|a| a.to_bytes()) - .collect(); - let mut proof: Vec = mini_merkle_tree_proof( - values, - l1_log_index, - L2ToL1Log::SERIALIZED_SIZE, - L2ToL1Log::limit_per_block(), - ) - .into_iter() - .map(|elem| H256::from_slice(&elem)) - .collect(); - let root = proof.pop().unwrap(); - + let merkle_tree_leaves = all_l1_logs_in_block.iter().map(L2ToL1Log::to_bytes); + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, L2ToL1Log::LIMIT_PER_BLOCK) + .merkle_root_and_path(l1_log_index); let msg_proof = L2ToL1LogProof { proof, root, id: l1_log_index as u32, }; - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); Ok(Some(msg_proof)) } #[tracing::instrument(skip(self))] - pub fn get_l1_batch_number_impl(&self) -> Result { - let start = Instant::now(); - let endpoint_name = "get_l1_batch_number"; + pub async fn get_l1_batch_number_impl(&self) -> Result { + const METHOD_NAME: &str = "get_l1_batch_number"; + let start = Instant::now(); let l1_batch_number = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_sealed_l1_batch_number() + .await .map(|n| U64::from(n.0)) - .map_err(|err| internal_error(endpoint_name, err)); + .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "endpoint" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "endpoint" => METHOD_NAME); l1_batch_number } #[tracing::instrument(skip(self))] - pub fn get_miniblock_range_impl( + pub async fn get_miniblock_range_impl( &self, batch: L1BatchNumber, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_miniblock_range"; + const METHOD_NAME: &str = "get_miniblock_range"; + let start = Instant::now(); let minmax = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_miniblock_range_of_l1_batch(batch) + .await .map(|minmax| minmax.map(|(min, max)| (U64::from(min.0), U64::from(max.0)))) - .map_err(|err| internal_error(endpoint_name, err)); + .map_err(|err| internal_error(METHOD_NAME, err)); - metrics::histogram!("api.web3.call", start.elapsed(), "endpoint" => endpoint_name); + metrics::histogram!("api.web3.call", start.elapsed(), "endpoint" => METHOD_NAME); minmax } #[tracing::instrument(skip(self))] - pub fn get_block_details_impl( + pub async fn get_block_details_impl( &self, block_number: MiniblockNumber, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_block_details"; + const METHOD_NAME: &str = "get_block_details"; + let start = Instant::now(); let block_details = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .blocks_dal() .get_block_details( block_number, self.state.tx_sender.0.sender_config.fee_account_addr, ) - .map_err(|err| internal_error(endpoint_name, err)); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); block_details } #[tracing::instrument(skip(self))] - pub fn get_raw_block_transactions_impl( + pub async fn get_raw_block_transactions_impl( &self, block_number: MiniblockNumber, - ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_raw_block_transactions"; + ) -> Result, Web3Error> { + const METHOD_NAME: &str = "get_raw_block_transactions"; + let start = Instant::now(); let transactions = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_raw_miniblock_transactions(block_number) - .map_err(|err| internal_error(endpoint_name, err)); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); transactions } #[tracing::instrument(skip(self))] - pub fn get_transaction_details_impl( + pub async fn get_transaction_details_impl( &self, hash: H256, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_transaction_details"; + const METHOD_NAME: &str = "get_transaction_details"; + let start = Instant::now(); let mut tx_details = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_transaction_details(hash) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); if let Some(proxy) = &self.state.tx_sender.0.proxy { // We're running an external node - we should query the main node directly @@ -535,59 +500,61 @@ impl ZksNamespace { // If the transaction is not in the db, query main node for details tx_details = proxy .request_tx_details(hash) - .map_err(|err| internal_error(endpoint_name, err)); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); } } - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); tx_details } #[tracing::instrument(skip(self))] - pub fn get_l1_batch_details_impl( + pub async fn get_l1_batch_details_impl( &self, batch_number: L1BatchNumber, ) -> Result, Web3Error> { - let start = Instant::now(); - let endpoint_name = "get_l1_batch"; + const METHOD_NAME: &str = "get_l1_batch"; + let start = Instant::now(); let l1_batch = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .explorer() .blocks_dal() .get_l1_batch_details(batch_number) - .map_err(|err| internal_error(endpoint_name, err)); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + .await + .map_err(|err| internal_error(METHOD_NAME, err)); + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); l1_batch } #[tracing::instrument(skip(self))] - pub fn get_bytecode_by_hash_impl(&self, hash: H256) -> Option> { - let start = Instant::now(); - let endpoint_name = "get_bytecode_by_hash"; + pub async fn get_bytecode_by_hash_impl(&self, hash: H256) -> Option> { + const METHOD_NAME: &str = "get_bytecode_by_hash"; + let start = Instant::now(); let bytecode = self .state .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .storage_dal() - .get_factory_dep(hash); - - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); + .get_factory_dep(hash) + .await; + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); bytecode } #[tracing::instrument(skip(self))] pub fn get_l1_gas_price_impl(&self) -> U64 { - let start = Instant::now(); - let endpoint_name = "get_l1_gas_price"; + const METHOD_NAME: &str = "get_l1_gas_price"; + let start = Instant::now(); let gas_price = self .state .tx_sender @@ -595,8 +562,7 @@ impl ZksNamespace { .l1_gas_price_source .estimate_effective_gas_price(); - metrics::histogram!("api.web3.call", start.elapsed(), "method" => endpoint_name); - + metrics::histogram!("api.web3.call", start.elapsed(), "method" => METHOD_NAME); gas_price.into() } @@ -605,7 +571,6 @@ impl ZksNamespace { pub fn set_known_bytecode_impl(&self, bytecode: Bytes) -> bool { let mut lock = self.state.known_bytecodes.write().unwrap(); lock.insert(bytecode.0.clone()); - true } } diff --git a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs index 813301ef41d9..f0b7a29cf5e8 100644 --- a/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs +++ b/core/bin/zksync_core/src/api_server/web3/pubsub_notifier.rs @@ -6,7 +6,7 @@ use zksync_dal::ConnectionPool; use zksync_types::MiniblockNumber; use zksync_web3_decl::types::{PubSubFilter, PubSubResult}; -use super::namespaces::eth_subscribe::SubscriptionMap; +use super::namespaces::SubscriptionMap; pub async fn notify_blocks( subscribers: SubscriptionMap>, @@ -15,9 +15,11 @@ pub async fn notify_blocks( stop_receiver: watch::Receiver, ) { let mut last_block_number = connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_sealed_miniblock_number() + .await .unwrap(); let mut timer = interval(polling_interval); loop { @@ -30,18 +32,30 @@ pub async fn notify_blocks( let start = Instant::now(); let new_blocks = connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_block_headers_after(last_block_number) + .await .unwrap(); metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "blocks"); if !new_blocks.is_empty() { last_block_number = MiniblockNumber(new_blocks.last().unwrap().number.unwrap().as_u32()); + let start = Instant::now(); - for sink in subscribers.read().unwrap().values() { - for block in new_blocks.clone() { - let _ = sink.notify(Ok(PubSubResult::Header(block))); + let subscribers = subscribers + .read() + .await + .values() + .cloned() + .collect::>(); + for sink in subscribers { + for block in new_blocks.iter().cloned() { + if sink.notify(Ok(PubSubResult::Header(block))).is_err() { + // Subscriber disconnected. + break; + } metrics::counter!("api.web3.pubsub.notify", 1, "subscription_type" => "blocks"); } } @@ -68,17 +82,29 @@ pub async fn notify_txs( let start = Instant::now(); let (new_txs, new_last_time) = connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .transactions_web3_dal() .get_pending_txs_hashes_after(last_time, None) + .await .unwrap(); metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "txs"); if let Some(new_last_time) = new_last_time { last_time = new_last_time; let start = Instant::now(); - for sink in subscribers.read().unwrap().values() { - for tx_hash in new_txs.clone() { - let _ = sink.notify(Ok(PubSubResult::TxHash(tx_hash))); + + let subscribers = subscribers + .read() + .await + .values() + .cloned() + .collect::>(); + for sink in subscribers { + for tx_hash in new_txs.iter().cloned() { + if sink.notify(Ok(PubSubResult::TxHash(tx_hash))).is_err() { + // Subscriber disconnected. + break; + } metrics::counter!("api.web3.pubsub.notify", 1, "subscription_type" => "txs"); } } @@ -94,9 +120,11 @@ pub async fn notify_logs( stop_receiver: watch::Receiver, ) { let mut last_block_number = connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .get_sealed_miniblock_number() + .await .unwrap(); let mut timer = interval(polling_interval); loop { @@ -109,19 +137,32 @@ pub async fn notify_logs( let start = Instant::now(); let new_logs = connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .events_web3_dal() .get_all_logs(last_block_number) + .await .unwrap(); metrics::histogram!("api.web3.pubsub.db_poll_latency", start.elapsed(), "subscription_type" => "logs"); if !new_logs.is_empty() { last_block_number = MiniblockNumber(new_logs.last().unwrap().block_number.unwrap().as_u32()); let start = Instant::now(); - for (sink, filter) in subscribers.read().unwrap().values() { - for log in new_logs.clone() { + + let subscribers = subscribers + .read() + .await + .values() + .cloned() + .collect::>(); + + for (sink, filter) in subscribers { + for log in new_logs.iter().cloned() { if filter.matches(&log) { - let _ = sink.notify(Ok(PubSubResult::Log(log))); + if sink.notify(Ok(PubSubResult::Log(log))).is_err() { + // Subscriber disconnected. + break; + } metrics::counter!("api.web3.pubsub.notify", 1, "subscription_type" => "logs"); } } diff --git a/core/bin/zksync_core/src/api_server/web3/state.rs b/core/bin/zksync_core/src/api_server/web3/state.rs index 64bf8bbc7022..c2c3003e5d51 100644 --- a/core/bin/zksync_core/src/api_server/web3/state.rs +++ b/core/bin/zksync_core/src/api_server/web3/state.rs @@ -4,20 +4,19 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::Arc; -use std::sync::RwLock; +use tokio::sync::RwLock; +use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; use crate::api_server::tx_sender::TxSender; -use crate::api_server::web3::backend_jsonrpc::error::internal_error; +use crate::api_server::web3::{backend_jsonrpc::error::internal_error, resolve_block}; use crate::sync_layer::SyncState; -use zksync_config::ZkSyncConfig; use zksync_dal::ConnectionPool; use zksync_eth_signer::PrivateKeySigner; + use zksync_types::{ - api::{self, BlockId, BlockNumber, BridgeAddresses, TransactionRequest}, - l2::L2Tx, - transaction_request::CallRequest, - Address, L1ChainId, L2ChainId, MiniblockNumber, H256, U256, U64, + api, l2::L2Tx, transaction_request::CallRequest, Address, L1ChainId, L2ChainId, + MiniblockNumber, H256, U256, U64, }; use zksync_web3_decl::{ error::Web3Error, @@ -35,30 +34,34 @@ pub struct InternalApiConfig { pub max_tx_size: usize, pub estimate_gas_scale_factor: f64, pub estimate_gas_acceptable_overestimation: u32, - pub bridge_addresses: BridgeAddresses, + pub bridge_addresses: api::BridgeAddresses, pub diamond_proxy_addr: Address, pub l2_testnet_paymaster_addr: Option
, pub req_entities_limit: usize, } -impl From for InternalApiConfig { - fn from(config: ZkSyncConfig) -> Self { +impl InternalApiConfig { + pub fn new( + eth_config: &NetworkConfig, + web3_config: &Web3JsonRpcConfig, + contracts_config: &ContractsConfig, + ) -> Self { Self { - l1_chain_id: config.chain.eth.network.chain_id(), - l2_chain_id: L2ChainId(config.chain.eth.zksync_network_id), - max_tx_size: config.api.web3_json_rpc.max_tx_size, - estimate_gas_scale_factor: config.api.web3_json_rpc.estimate_gas_scale_factor, - estimate_gas_acceptable_overestimation: config - .api - .web3_json_rpc + l1_chain_id: eth_config.network.chain_id(), + l2_chain_id: L2ChainId(eth_config.zksync_network_id), + max_tx_size: web3_config.max_tx_size, + estimate_gas_scale_factor: web3_config.estimate_gas_scale_factor, + estimate_gas_acceptable_overestimation: web3_config .estimate_gas_acceptable_overestimation, - bridge_addresses: BridgeAddresses { - l1_erc20_default_bridge: config.contracts.l1_erc20_bridge_proxy_addr, - l2_erc20_default_bridge: config.contracts.l2_erc20_bridge_addr, + bridge_addresses: api::BridgeAddresses { + l1_erc20_default_bridge: contracts_config.l1_erc20_bridge_proxy_addr, + l2_erc20_default_bridge: contracts_config.l2_erc20_bridge_addr, + l1_weth_bridge: contracts_config.l1_weth_bridge_proxy_addr, + l2_weth_bridge: contracts_config.l2_weth_bridge_addr, }, - diamond_proxy_addr: config.contracts.diamond_proxy_addr, - l2_testnet_paymaster_addr: config.contracts.l2_testnet_paymaster_addr, - req_entities_limit: config.api.web3_json_rpc.req_entities_limit(), + diamond_proxy_addr: contracts_config.diamond_proxy_addr, + l2_testnet_paymaster_addr: contracts_config.l2_testnet_paymaster_addr, + req_entities_limit: web3_config.req_entities_limit(), } } } @@ -98,7 +101,7 @@ impl RpcState { pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = - TransactionRequest::from_bytes(bytes, chain_id.0, self.api_config.max_tx_size)?; + api::TransactionRequest::from_bytes(bytes, chain_id.0, self.api_config.max_tx_size)?; Ok((tx_request.try_into()?, hash)) } @@ -111,50 +114,77 @@ impl RpcState { } } - pub fn resolve_filter_block_number( + pub async fn resolve_filter_block_number( &self, block_number: Option, ) -> Result { - let method_name = "resolve_filter_block_number"; - let block_number = match block_number { - None => self - .connection_pool - .access_storage_blocking() - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) - .map_err(|err| internal_error(method_name, err))? - .unwrap(), - Some(api::BlockNumber::Number(number)) => Self::u64_to_block_number(number), - Some(block_number) => self - .connection_pool - .access_storage_blocking() - .blocks_web3_dal() - .resolve_block_id(api::BlockId::Number(block_number)) - .map_err(|err| internal_error(method_name, err))? - .unwrap(), - }; - Ok(block_number) + const METHOD_NAME: &str = "resolve_filter_block_number"; + + if let Some(api::BlockNumber::Number(number)) = block_number { + return Ok(Self::u64_to_block_number(number)); + } + + let block_number = block_number.unwrap_or(api::BlockNumber::Latest); + let block_id = api::BlockId::Number(block_number); + let mut conn = self.connection_pool.access_storage_tagged("api").await; + Ok(conn + .blocks_web3_dal() + .resolve_block_id(block_id) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .unwrap()) + // ^ `unwrap()` is safe: `resolve_block_id(api::BlockId::Number(_))` can only return `None` + // if called with an explicit number, and we've handled this case earlier. } - pub fn resolve_filter_block_range( + pub async fn resolve_filter_block_range( &self, filter: &Filter, ) -> Result<(MiniblockNumber, MiniblockNumber), Web3Error> { - let from_block = self.resolve_filter_block_number(filter.from_block)?; - let to_block = self.resolve_filter_block_number(filter.to_block)?; + let from_block = self.resolve_filter_block_number(filter.from_block).await?; + let to_block = self.resolve_filter_block_number(filter.to_block).await?; Ok((from_block, to_block)) } + /// If filter has `block_hash` then it resolves block number by hash and sets it to `from_block` and `to_block`. + pub async fn resolve_filter_block_hash(&self, filter: &mut Filter) -> Result<(), Web3Error> { + match (filter.block_hash, filter.from_block, filter.to_block) { + (Some(block_hash), None, None) => { + let block_number = self + .connection_pool + .access_storage_tagged("api") + .await + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Hash(block_hash)) + .await + .map_err(|err| internal_error("resolve_filter_block_hash", err))? + .ok_or(Web3Error::NoBlock)?; + + filter.from_block = Some(api::BlockNumber::Number(block_number.0.into())); + filter.to_block = Some(api::BlockNumber::Number(block_number.0.into())); + Ok(()) + } + (Some(_), _, _) => Err(Web3Error::InvalidFilterBlockHash), + (None, _, _) => Ok(()), + } + } + /// Returns initial `from_block` for filter. /// It is equal to max(filter.from_block, PENDING_BLOCK). - pub fn get_filter_from_block(&self, filter: &Filter) -> Result { - let method_name = "get_filter_from_block"; + pub async fn get_filter_from_block( + &self, + filter: &Filter, + ) -> Result { + const METHOD_NAME: &str = "get_filter_from_block"; + let pending_block = self .connection_pool - .access_storage_blocking() + .access_storage_tagged("api") + .await .blocks_web3_dal() .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) - .map_err(|err| internal_error(method_name, err))? + .await + .map_err(|err| internal_error(METHOD_NAME, err))? .expect("Pending block number shouldn't be None"); let block_number = match filter.from_block { Some(api::BlockNumber::Number(number)) => { @@ -166,24 +196,23 @@ impl RpcState { Ok(block_number) } - pub(crate) fn set_nonce_for_call_request( + pub(crate) async fn set_nonce_for_call_request( &self, call_request: &mut CallRequest, ) -> Result<(), Web3Error> { - let method_name = "set_nonce_for_call_request"; + const METHOD_NAME: &str = "set_nonce_for_call_request"; + if call_request.nonce.is_none() { let from = call_request.from.unwrap_or_default(); - let address_historical_nonce = self - .connection_pool - .access_storage_blocking() + let block_id = api::BlockId::Number(api::BlockNumber::Latest); + let mut connection = self.connection_pool.access_storage_tagged("api").await; + let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?; + let address_historical_nonce = connection .storage_web3_dal() - .get_address_historical_nonce(from, BlockId::Number(BlockNumber::Latest)); - - call_request.nonce = Some( - address_historical_nonce - .unwrap() - .map_err(|result| internal_error(method_name, result.to_string()))?, - ); + .get_address_historical_nonce(from, block_number) + .await + .map_err(|err| internal_error(METHOD_NAME, err))?; + call_request.nonce = Some(address_historical_nonce); } Ok(()) } diff --git a/core/bin/zksync_core/src/bin/block_reverter.rs b/core/bin/zksync_core/src/bin/block_reverter.rs index fb10fcd985dd..95f527d17f4d 100644 --- a/core/bin/zksync_core/src/bin/block_reverter.rs +++ b/core/bin/zksync_core/src/bin/block_reverter.rs @@ -1,11 +1,13 @@ use clap::{Parser, Subcommand}; use tokio::io::{self, AsyncReadExt}; -use zksync_config::ZkSyncConfig; -use zksync_dal::ConnectionPool; +use zksync_config::{ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig}; +use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_types::{L1BatchNumber, U256}; -use zksync_core::block_reverter::{BlockReverter, BlockReverterFlags, L1ExecutedBatchesRevert}; +use zksync_core::block_reverter::{ + BlockReverter, BlockReverterEthConfig, BlockReverterFlags, L1ExecutedBatchesRevert, +}; #[derive(Debug, Parser)] #[command(author = "Matter Labs", version, about = "Block revert utility", long_about = None)] @@ -63,13 +65,23 @@ enum Command { #[tokio::main] async fn main() -> anyhow::Result<()> { - let _sentry_guard = vlog::init(); - let config = ZkSyncConfig::from_env(); + vlog::init(); + let _sentry_guard = vlog::init_sentry(); + let eth_sender = ETHSenderConfig::from_env(); + let db_config = DBConfig::from_env(); + let eth_client = ETHClientConfig::from_env(); let default_priority_fee_per_gas = - U256::from(config.eth_sender.gas_adjuster.default_priority_fee_per_gas); - let connection_pool = ConnectionPool::new(None, true); - let block_reverter = - BlockReverter::new(config, connection_pool, L1ExecutedBatchesRevert::Disallowed); + U256::from(eth_sender.gas_adjuster.default_priority_fee_per_gas); + let contracts = ContractsConfig::from_env(); + let config = BlockReverterEthConfig::new(eth_sender, contracts, eth_client.web3_url.clone()); + + let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; + let block_reverter = BlockReverter::new( + db_config, + Some(config), + connection_pool, + L1ExecutedBatchesRevert::Disallowed, + ); match Cli::parse().command { Command::Display { json } => { diff --git a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs index 775eba3419e4..f81df1fbddb0 100644 --- a/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs +++ b/core/bin/zksync_core/src/bin/merkle_tree_consistency_checker.rs @@ -2,10 +2,8 @@ use clap::Parser; use std::{num::NonZeroU32, time::Instant}; -use zksync_config::ZkSyncConfig; -use zksync_merkle_tree::ZkSyncTree; -use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; -use zksync_storage::db::Database; +use zksync_config::DBConfig; +use zksync_merkle_tree::domain::ZkSyncTree; use zksync_storage::RocksDB; #[derive(Debug, Parser)] @@ -16,78 +14,34 @@ use zksync_storage::RocksDB; long_about = None )] struct Cli { - /// Do not check the old tree implementation in full mode. By default, this is the only - /// tree checked. - #[arg(long = "no-full")] - no_full: bool, - /// Check the old tree implementation in lightweight mode. - #[arg(long = "lightweight")] - lightweight: bool, - /// Check the new tree implementation in lightweight mode. The optional argument - /// specifies the version of the tree to be checked, expressed as a non-zero number + /// Specifies the version of the tree to be checked, expressed as a non-zero number /// of blocks applied to it. By default, the latest tree version is checked. - #[arg(long = "lightweight-new", value_name = "BLOCKS")] - new_lightweight: Option>, + #[arg(long = "blocks")] + blocks: Option, } impl Cli { - fn run(self, config: &ZkSyncConfig) { - if !self.no_full { - let db_path = config.db.path(); - vlog::info!( - "Verifying consistency of old tree, full mode at {}", - db_path - ); - let start = Instant::now(); - let db = RocksDB::new(Database::MerkleTree, db_path, true); - let tree = ZkSyncTree::new(db); - tree.verify_consistency(); - vlog::info!("Old tree in full mode verified in {:?}", start.elapsed()); - } - - if self.lightweight { - let db_path = config.db.merkle_tree_fast_ssd_path(); - vlog::info!( - "Verifying consistency of old tree, lightweight mode at {}", - db_path - ); - let start = Instant::now(); - let db = RocksDB::new(Database::MerkleTree, db_path, true); - let tree = ZkSyncTree::new_lightweight(db); - tree.verify_consistency(); - vlog::info!( - "Old tree in lightweight mode verified in {:?}", - start.elapsed() - ); - } - - if let Some(maybe_block_number) = self.new_lightweight { - let db_path = &config.db.new_merkle_tree_ssd_path; - vlog::info!( - "Verifying consistency of new tree, lightweight mode at {}", - db_path - ); - let start = Instant::now(); - let db = RocksDB::new(Database::MerkleTree, db_path, true); - let tree = NewTree::new_lightweight(db); - - let block_number = maybe_block_number.or_else(|| NonZeroU32::new(tree.block_number())); - if let Some(block_number) = block_number { - vlog::info!("Block number to check: {}", block_number); - tree.verify_consistency(block_number); - vlog::info!( - "New tree in lightweight mode verified in {:?}", - start.elapsed() - ); - } else { - vlog::info!("The tree is empty, skipping"); - } + fn run(self, config: &DBConfig) { + let db_path = &config.new_merkle_tree_ssd_path; + vlog::info!("Verifying consistency of Merkle tree at {db_path}"); + let start = Instant::now(); + let db = RocksDB::new(db_path, true); + let tree = ZkSyncTree::new_lightweight(db); + + let block_number = self.blocks.or_else(|| NonZeroU32::new(tree.block_number())); + if let Some(block_number) = block_number { + vlog::info!("Block number to check: {block_number}"); + tree.verify_consistency(block_number); + vlog::info!("Merkle tree verified in {:?}", start.elapsed()); + } else { + vlog::info!("Merkle tree is empty, skipping"); } } } fn main() { - let _sentry_guard = vlog::init(); - let config = ZkSyncConfig::from_env(); - Cli::parse().run(&config); + vlog::init(); + let _sentry_guard = vlog::init_sentry(); + let db_config = DBConfig::from_env(); + Cli::parse().run(&db_config); } diff --git a/core/bin/zksync_core/src/bin/rocksdb_util.rs b/core/bin/zksync_core/src/bin/rocksdb_util.rs index 8085390ce902..a799934606e5 100644 --- a/core/bin/zksync_core/src/bin/rocksdb_util.rs +++ b/core/bin/zksync_core/src/bin/rocksdb_util.rs @@ -25,7 +25,8 @@ fn create_backup(config: &DBConfig) -> Result<(), Error> { &BackupEngineOptions::default(), config.merkle_tree_backup_path(), )?; - let db = DB::open_for_read_only(&Options::default(), config.path(), false)?; + let db_dir = &config.new_merkle_tree_ssd_path; + let db = DB::open_for_read_only(&Options::default(), db_dir, false)?; engine.create_new_backup(&db)?; engine.purge_old_backups(config.backup_count()) } @@ -35,14 +36,15 @@ fn restore_from_latest_backup(config: &DBConfig) -> Result<(), Error> { &BackupEngineOptions::default(), config.merkle_tree_backup_path(), )?; - engine.restore_from_latest_backup(config.path(), config.path(), &RestoreOptions::default()) + let db_dir = &config.new_merkle_tree_ssd_path; + engine.restore_from_latest_backup(db_dir, db_dir, &RestoreOptions::default()) } fn main() { - let config = DBConfig::from_env(); + let db_config = DBConfig::from_env(); match Cli::parse().command { - Command::Backup => create_backup(&config).unwrap(), - Command::Restore => restore_from_latest_backup(&config).unwrap(), + Command::Backup => create_backup(&db_config).unwrap(), + Command::Restore => restore_from_latest_backup(&db_config).unwrap(), } } @@ -55,23 +57,24 @@ mod tests { fn backup_restore_workflow() { let backup_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); let temp_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); - let config = DBConfig { - path: temp_dir.path().to_str().unwrap().to_string(), + let db_config = DBConfig { + new_merkle_tree_ssd_path: temp_dir.path().to_str().unwrap().to_string(), merkle_tree_backup_path: backup_dir.path().to_str().unwrap().to_string(), ..Default::default() }; + let db_dir = &db_config.new_merkle_tree_ssd_path; let mut options = Options::default(); options.create_if_missing(true); - let db = DB::open(&options, temp_dir.as_ref()).unwrap(); + let db = DB::open(&options, db_dir).unwrap(); db.put(b"key", b"value").expect("failed to write to db"); - create_backup(&config).expect("failed to create backup"); - // drop original db + create_backup(&db_config).expect("failed to create backup"); + // Drop original database drop((db, temp_dir)); - restore_from_latest_backup(&config).expect("failed to restore from backup"); - let db = DB::open(&Options::default(), config.path()).unwrap(); + restore_from_latest_backup(&db_config).expect("failed to restore from backup"); + let db = DB::open(&Options::default(), db_dir).unwrap(); assert_eq!(db.get(b"key").unwrap().unwrap(), b"value"); } } diff --git a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs index d8125a961cf6..8184baec29fd 100644 --- a/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs +++ b/core/bin/zksync_core/src/bin/verified_sources_fetcher.rs @@ -1,14 +1,16 @@ use std::io::Write; -use zksync_dal::ConnectionPool; +use zksync_dal::{connection::DbVariant, ConnectionPool}; use zksync_types::explorer_api::SourceCodeData; -fn main() { - let pool = ConnectionPool::new(Some(1), false); - let mut storage = pool.access_storage_blocking(); +#[tokio::main] +async fn main() { + let pool = ConnectionPool::new(Some(1), DbVariant::Replica).await; + let mut storage = pool.access_storage().await; let reqs = storage .explorer() .contract_verification_dal() .get_all_successful_requests() + .await .unwrap(); std::fs::create_dir_all("./verified_sources").unwrap(); @@ -48,6 +50,16 @@ fn main() { file.write_all(content.as_bytes()).unwrap(); } } + SourceCodeData::VyperMultiFile(sources) => { + for (key, content) in sources { + let p = format!("{}/{}.vy", &dir, key); + let path = std::path::Path::new(p.as_str()); + let prefix = path.parent().unwrap(); + std::fs::create_dir_all(prefix).unwrap(); + let mut file = std::fs::File::create(path).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + } + } } } } diff --git a/core/bin/zksync_core/src/bin/zksync_server.rs b/core/bin/zksync_core/src/bin/zksync_server.rs index bfd0fee72fd8..f1c07360adeb 100644 --- a/core/bin/zksync_core/src/bin/zksync_server.rs +++ b/core/bin/zksync_core/src/bin/zksync_server.rs @@ -1,13 +1,15 @@ use clap::Parser; use std::{env, str::FromStr, time::Duration}; +use zksync_config::configs::chain::NetworkConfig; -use zksync_config::ZkSyncConfig; +use zksync_config::ETHSenderConfig; use zksync_core::{ - genesis_init, initialize_components, setup_sigint_handler, wait_for_tasks, Component, + genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, Component, Components, }; use zksync_storage::RocksDB; +use zksync_utils::wait_for_tasks::wait_for_tasks; #[derive(Debug, Parser)] #[structopt(author = "Matter Labs", version, about = "zkSync operator node", long_about = None)] @@ -21,7 +23,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,tree_lightweight,eth,data_fetcher,state_keeper,witness_generator,housekeeper" + default_value = "api,tree,eth,data_fetcher,state_keeper,witness_generator,housekeeper" )] components: ComponentsToRun, } @@ -45,12 +47,16 @@ impl FromStr for ComponentsToRun { #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Cli::parse(); - let mut config = ZkSyncConfig::from_env(); - let sentry_guard = vlog::init(); - - if opt.genesis { - genesis_init(config).await; - return Ok(()); + vlog::init(); + let sentry_guard = vlog::init_sentry(); + + if opt.genesis || is_genesis_needed().await { + let network = NetworkConfig::from_env(); + let eth_sender = ETHSenderConfig::from_env(); + genesis_init(ð_sender, &network).await; + if opt.genesis { + return Ok(()); + } } if sentry_guard.is_some() { @@ -70,13 +76,6 @@ async fn main() -> anyhow::Result<()> { opt.components.0 }; - if cfg!(feature = "openzeppelin_tests") { - // Set very small block timeout for tests to work faster. - config.chain.state_keeper.block_commit_deadline_ms = 1; - } - - genesis_init(config.clone()).await; - // OneShotWitnessGenerator is the only component that is not expected to run indefinitely // if this value is `false`, we expect all components to run indefinitely: we panic if any component returns. let is_only_oneshot_witness_generator_task = matches!( @@ -85,16 +84,19 @@ async fn main() -> anyhow::Result<()> { ); // Run core actors. - let (core_task_handles, stop_sender, cb_receiver) = - initialize_components(&config, components, is_only_oneshot_witness_generator_task) + let (core_task_handles, stop_sender, cb_receiver, health_check_handle) = + initialize_components(components, is_only_oneshot_witness_generator_task) .await .expect("Unable to start Core actors"); vlog::info!("Running {} core task handlers", core_task_handles.len()); let sigint_receiver = setup_sigint_handler(); + let particular_crypto_alerts = None::>; + let graceful_shutdown = None::>; + let tasks_allowed_to_finish = is_only_oneshot_witness_generator_task; tokio::select! { - _ = wait_for_tasks(core_task_handles, is_only_oneshot_witness_generator_task) => {}, + _ = wait_for_tasks(core_task_handles, particular_crypto_alerts, graceful_shutdown, tasks_allowed_to_finish) => {}, _ = sigint_receiver => { vlog::info!("Stop signal received, shutting down"); }, @@ -108,5 +110,7 @@ async fn main() -> anyhow::Result<()> { RocksDB::await_rocksdb_termination(); // Sleep for some time to let some components gracefully stop. tokio::time::sleep(Duration::from_secs(5)).await; + health_check_handle.stop().await; + vlog::info!("Stopped"); Ok(()) } diff --git a/core/bin/zksync_core/src/block_reverter/mod.rs b/core/bin/zksync_core/src/block_reverter/mod.rs index c9a1bf5dfe69..df7fa8f1a622 100644 --- a/core/bin/zksync_core/src/block_reverter/mod.rs +++ b/core/bin/zksync_core/src/block_reverter/mod.rs @@ -5,19 +5,23 @@ use tokio::time::sleep; use std::path::Path; use std::time::Duration; -use zksync_config::ZkSyncConfig; +use zksync_config::{ContractsConfig, DBConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; use zksync_dal::ConnectionPool; -use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface, EthInterface}; -use zksync_merkle_tree::ZkSyncTree; -use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; -use zksync_state::secondary_storage::SecondaryStateStorage; -use zksync_storage::db::Database; +use zksync_merkle_tree::domain::ZkSyncTree; +use zksync_state::RocksdbStorage; use zksync_storage::RocksDB; use zksync_types::aggregated_operations::AggregatedActionType; use zksync_types::ethabi::Token; -use zksync_types::web3::contract::Options; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_types::web3::{ + contract::{Contract, Options}, + transports::Http, + types::{BlockId, BlockNumber}, + Web3, +}; +use zksync_types::{L1BatchNumber, PackedEthSignature, H160, H256, U256}; + +use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; bitflags! { pub struct BlockReverterFlags: u32 { @@ -39,6 +43,36 @@ pub enum L1ExecutedBatchesRevert { Disallowed, } +#[derive(Debug)] +pub struct BlockReverterEthConfig { + eth_client_url: String, + reverter_private_key: H256, + reverter_address: H160, + diamond_proxy_addr: H160, + validator_timelock_addr: H160, + default_priority_fee_per_gas: u64, +} + +impl BlockReverterEthConfig { + pub fn new(eth_config: ETHSenderConfig, contract: ContractsConfig, web3_url: String) -> Self { + let pk = eth_config + .sender + .private_key() + .expect("Private key is required for block reversion"); + let operator_address = PackedEthSignature::address_from_private_key(&pk) + .expect("Failed to get address from private key"); + + Self { + eth_client_url: web3_url, + reverter_private_key: pk, + reverter_address: operator_address, + diamond_proxy_addr: contract.diamond_proxy_addr, + validator_timelock_addr: contract.validator_timelock_addr, + default_priority_fee_per_gas: eth_config.gas_adjuster.default_priority_fee_per_gas, + } + } +} + /// This struct is used to perform a rollback of the state. /// Rollback is a rare event of manual intervention, when the node operator /// decides to revert some of the not yet finalized batches for some reason @@ -54,19 +88,22 @@ pub enum L1ExecutedBatchesRevert { /// - State of the Ethereum contract (if the block was committed) #[derive(Debug)] pub struct BlockReverter { - config: ZkSyncConfig, + db_config: DBConfig, + eth_config: Option, connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, } impl BlockReverter { pub fn new( - config: ZkSyncConfig, + db_config: DBConfig, + eth_config: Option, connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, ) -> Self { Self { - config, + eth_config, + db_config, connection_pool, executed_batches_revert_mode, } @@ -86,9 +123,12 @@ impl BlockReverter { self.executed_batches_revert_mode, L1ExecutedBatchesRevert::Disallowed ) { - let last_executed_l1_batch = self - .get_l1_batch_number_from_contract(AggregatedActionType::ExecuteBlocks) - .await; + let mut storage = self.connection_pool.access_storage().await; + let last_executed_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_executed_on_eth() + .await + .expect("failed to get last executed L1 block"); assert!( last_l1_batch_to_keep >= last_executed_l1_batch, "Attempt to revert already executed blocks" @@ -99,7 +139,7 @@ impl BlockReverter { self.rollback_rocks_dbs(last_l1_batch_to_keep, rollback_tree, rollback_sk_cache) .await; if rollback_postgres { - self.rollback_postgres(last_l1_batch_to_keep); + self.rollback_postgres(last_l1_batch_to_keep).await; } } @@ -109,55 +149,18 @@ impl BlockReverter { rollback_tree: bool, rollback_sk_cache: bool, ) { - vlog::info!("getting logs that should be applied to rollback state..."); - let logs = self - .connection_pool - .access_storage_blocking() - .storage_logs_dal() - .get_storage_logs_for_revert(last_l1_batch_to_keep); - if rollback_tree { let storage_root_hash = self .connection_pool - .access_storage_blocking() + .access_storage() + .await .blocks_dal() - .get_merkle_state_root(last_l1_batch_to_keep) + .get_block_state_root(last_l1_batch_to_keep) + .await .expect("failed to fetch root hash for target block"); - // Convert H256 -> U256, note that tree keys are encoded using little endianness. - let logs: Vec<_> = logs - .iter() - .map(|(key, value)| (U256::from_little_endian(&key.to_fixed_bytes()), *value)) - .collect(); - - // Rolling back both full tree and lightweight tree - let full_tree_path = self.config.db.path(); - if Path::new(full_tree_path).exists() { - vlog::info!("Rolling back full tree..."); - Self::rollback_tree( - last_l1_batch_to_keep, - logs.clone(), - full_tree_path, - storage_root_hash, - ); - } else { - vlog::info!("Full tree not found; skipping"); - } - - let lightweight_tree_path = self.config.db.merkle_tree_fast_ssd_path(); - if Path::new(lightweight_tree_path).exists() { - vlog::info!("Rolling back lightweight tree..."); - Self::rollback_tree( - last_l1_batch_to_keep, - logs, - lightweight_tree_path, - storage_root_hash, - ); - } else { - vlog::info!("Lightweight tree not found; skipping"); - } - - let new_lightweight_tree_path = &self.config.db.new_merkle_tree_ssd_path; + // Rolling back Merkle tree + let new_lightweight_tree_path = &self.db_config.new_merkle_tree_ssd_path; if Path::new(new_lightweight_tree_path).exists() { vlog::info!("Rolling back new lightweight tree..."); Self::rollback_new_tree( @@ -172,43 +175,21 @@ impl BlockReverter { if rollback_sk_cache { assert!( - Path::new(self.config.db.state_keeper_db_path()).exists(), + Path::new(self.db_config.state_keeper_db_path()).exists(), "Path with state keeper cache DB doesn't exist" ); - self.rollback_state_keeper_cache(last_l1_batch_to_keep, logs) + self.rollback_state_keeper_cache(last_l1_batch_to_keep) .await; } } - /// Reverts blocks in a Merkle tree. - fn rollback_tree( - last_l1_batch_to_keep: L1BatchNumber, - logs: Vec<(U256, Option)>, - path: impl AsRef, - storage_root_hash: H256, - ) { - let db = RocksDB::new(Database::MerkleTree, path, true); - let mut tree = ZkSyncTree::new(db); - - if tree.block_number() <= last_l1_batch_to_keep.0 { - vlog::info!("Tree is behind the block to revert to; skipping"); - return; - } - tree.revert_logs(last_l1_batch_to_keep, logs); - - vlog::info!("checking match of the tree root hash and root hash from Postgres..."); - assert_eq!(tree.root_hash(), storage_root_hash); - vlog::info!("saving tree changes to disk..."); - tree.save().expect("Unable to update tree state"); - } - fn rollback_new_tree( last_l1_batch_to_keep: L1BatchNumber, path: impl AsRef, storage_root_hash: H256, ) { - let db = RocksDB::new(Database::MerkleTree, path, true); - let mut tree = NewTree::new_lightweight(db); + let db = RocksDB::new(path, true); + let mut tree = ZkSyncTree::new_lightweight(db); if tree.block_number() <= last_l1_batch_to_keep.0 { vlog::info!("Tree is behind the block to revert to; skipping"); @@ -223,86 +204,79 @@ impl BlockReverter { } /// Reverts blocks in the state keeper cache. - async fn rollback_state_keeper_cache( - &self, - last_l1_batch_to_keep: L1BatchNumber, - logs: Vec<(H256, Option)>, - ) { + async fn rollback_state_keeper_cache(&self, last_l1_batch_to_keep: L1BatchNumber) { vlog::info!("opening DB with state keeper cache..."); - let db = RocksDB::new( - Database::StateKeeper, - self.config.db.state_keeper_db_path(), - true, - ); - let mut sk_cache = SecondaryStateStorage::new(db); - - if sk_cache.get_l1_batch_number() > last_l1_batch_to_keep + 1 { - vlog::info!("getting contracts and factory deps that should be removed..."); - let mut storage = self.connection_pool.access_storage_blocking(); - let (_, last_miniblock_to_keep) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) - .expect("L1 batch should contain at least one miniblock"); - let factory_deps = storage - .storage_dal() - .get_factory_deps_for_revert(last_miniblock_to_keep); + let path = self.db_config.state_keeper_db_path().as_ref(); + let mut sk_cache = RocksdbStorage::new(path); + if sk_cache.l1_batch_number() > last_l1_batch_to_keep + 1 { + let mut storage = self.connection_pool.access_storage().await; vlog::info!("rolling back state keeper cache..."); - sk_cache.rollback(logs, factory_deps, last_l1_batch_to_keep); + sk_cache.rollback(&mut storage, last_l1_batch_to_keep).await; } else { vlog::info!("nothing to revert in state keeper cache"); } } /// Reverts data in the Postgres database. - fn rollback_postgres(&self, last_l1_batch_to_keep: L1BatchNumber) { + async fn rollback_postgres(&self, last_l1_batch_to_keep: L1BatchNumber) { vlog::info!("rolling back postgres data..."); - let mut storage = self.connection_pool.access_storage_blocking(); - let mut transaction = storage.start_transaction_blocking(); + let mut storage = self.connection_pool.access_storage().await; + let mut transaction = storage.start_transaction().await; let (_, last_miniblock_to_keep) = transaction .blocks_dal() .get_miniblock_range_of_l1_batch(last_l1_batch_to_keep) + .await .expect("L1 batch should contain at least one miniblock"); vlog::info!("rolling back transactions state..."); transaction .transactions_dal() - .reset_transactions_state(last_miniblock_to_keep); + .reset_transactions_state(last_miniblock_to_keep) + .await; vlog::info!("rolling back events..."); transaction .events_dal() - .rollback_events(last_miniblock_to_keep); + .rollback_events(last_miniblock_to_keep) + .await; vlog::info!("rolling back l2 to l1 logs..."); transaction .events_dal() - .rollback_l2_to_l1_logs(last_miniblock_to_keep); + .rollback_l2_to_l1_logs(last_miniblock_to_keep) + .await; vlog::info!("rolling back created tokens..."); transaction .tokens_dal() - .rollback_tokens(last_miniblock_to_keep); + .rollback_tokens(last_miniblock_to_keep) + .await; vlog::info!("rolling back factory deps...."); transaction .storage_dal() - .rollback_factory_deps(last_miniblock_to_keep); + .rollback_factory_deps(last_miniblock_to_keep) + .await; vlog::info!("rolling back storage..."); transaction .storage_logs_dal() - .rollback_storage(last_miniblock_to_keep); + .rollback_storage(last_miniblock_to_keep) + .await; vlog::info!("rolling back storage logs..."); transaction .storage_logs_dal() - .rollback_storage_logs(last_miniblock_to_keep); + .rollback_storage_logs(last_miniblock_to_keep) + .await; vlog::info!("rolling back l1 batches..."); transaction .blocks_dal() - .delete_l1_batches(last_l1_batch_to_keep); + .delete_l1_batches(last_l1_batch_to_keep) + .await; vlog::info!("rolling back miniblocks..."); transaction .blocks_dal() - .delete_miniblocks(last_miniblock_to_keep); + .delete_miniblocks(last_miniblock_to_keep) + .await; - transaction.commit_blocking(); + transaction.commit().await; } /// Sends revert transaction to L1. @@ -312,44 +286,52 @@ impl BlockReverter { priority_fee_per_gas: U256, nonce: u64, ) { - let eth_gateway = PKSigningClient::from_config(&self.config); + let eth_config = self + .eth_config + .as_ref() + .expect("eth_config is not provided"); + + let web3 = Web3::new(Http::new(ð_config.eth_client_url).unwrap()); let contract = zksync_contract(); - let revert_blocks = contract - .functions - .get("revertBlocks") - .expect("revertBlocks function not found") - .last() - .expect("revertBlocks function entry not found"); - let args = [Token::Uint(U256::from(last_l1_batch_to_keep.0))]; - let raw_tx = revert_blocks - .encode_input(&args) - .expect("Failed to encode transaction data.") - .to_vec(); - let signed_tx = eth_gateway - .sign_prepared_tx_for_addr( - raw_tx, - self.config.contracts.validator_timelock_addr, - Options::with(|opt| { - opt.gas = Some(5_000_000.into()); - opt.max_priority_fee_per_gas = Some(priority_fee_per_gas); - opt.nonce = Some(nonce.into()); - }), - "block-reverter", - ) + let signer = PrivateKeySigner::new(eth_config.reverter_private_key); + let chain_id = web3.eth().chain_id().await.unwrap().as_u64(); + + let data = contract + .function("revertBlocks") + .unwrap() + .encode_input(&[Token::Uint(last_l1_batch_to_keep.0.into())]) + .unwrap(); + + let base_fee = web3 + .eth() + .block(BlockId::Number(BlockNumber::Pending)) .await - .expect("Failed to sign transaction"); - let tx_hash = eth_gateway - .send_raw_tx(signed_tx.raw_tx) + .unwrap() + .unwrap() + .base_fee_per_gas + .unwrap(); + + let tx = TransactionParameters { + to: eth_config.validator_timelock_addr.into(), + data, + chain_id, + nonce: nonce.into(), + max_priority_fee_per_gas: priority_fee_per_gas, + max_fee_per_gas: base_fee + priority_fee_per_gas, + gas: 5_000_000.into(), + ..Default::default() + }; + + let signed_tx = signer.sign_transaction(tx).await.unwrap(); + let hash = web3 + .eth() + .send_raw_transaction(signed_tx.into()) .await - .expect("failed to send revert transaction to L1"); + .unwrap(); loop { - if let Some(status) = eth_gateway - .get_tx_status(tx_hash, "block reverter") - .await - .expect("Failed to get tx status from eth node") - { - assert!(status.success); + if let Some(receipt) = web3.eth().transaction_receipt(hash).await.unwrap() { + assert_eq!(receipt.status, Some(1.into()), "revert transaction failed"); vlog::info!("revert transaction has completed"); return; } else { @@ -365,17 +347,28 @@ impl BlockReverter { AggregatedActionType::PublishProofBlocksOnchain => "getTotalBlocksVerified", AggregatedActionType::ExecuteBlocks => "getTotalBlocksExecuted", }; - let eth_gateway = PKSigningClient::from_config(&self.config); - let block_number: U256 = eth_gateway - .call_main_contract_function(function_name, (), None, Options::default(), None) + let eth_config = self + .eth_config + .as_ref() + .expect("eth_config is not provided"); + + let web3 = Web3::new(Http::new(ð_config.eth_client_url).unwrap()); + let contract = { + let abi = zksync_contract(); + let contract_address = eth_config.diamond_proxy_addr; + Contract::new(web3.eth(), contract_address, abi) + }; + + let block_number: U256 = contract + .query(function_name, (), None, Options::default(), None) .await .unwrap(); + L1BatchNumber(block_number.as_u32()) } /// Returns suggested values for rollback. pub async fn suggested_values(&self) -> SuggestedRollbackValues { - let eth_gateway = PKSigningClient::from_config(&self.config); let last_committed_l1_batch_number = self .get_l1_batch_number_from_contract(AggregatedActionType::CommitBlocks) .await; @@ -392,16 +385,20 @@ impl BlockReverter { last_executed_l1_batch_number ); - let nonce = eth_gateway - .pending_nonce("reverter") + let eth_config = self + .eth_config + .as_ref() + .expect("eth_config is not provided"); + + let priority_fee = eth_config.default_priority_fee_per_gas; + + let web3 = Web3::new(Http::new(ð_config.eth_client_url).unwrap()); + let nonce = web3 + .eth() + .transaction_count(eth_config.reverter_address, Some(BlockNumber::Pending)) .await .unwrap() .as_u64(); - let priority_fee = self - .config - .eth_sender - .gas_adjuster - .default_priority_fee_per_gas; SuggestedRollbackValues { last_executed_l1_batch_number, @@ -414,9 +411,11 @@ impl BlockReverter { pub async fn clear_failed_l1_transactions(&self) { vlog::info!("clearing failed L1 transactions..."); self.connection_pool - .access_storage_blocking() + .access_storage() + .await .eth_sender_dal() - .clear_failed_transactions(); + .clear_failed_transactions() + .await; } } diff --git a/core/bin/zksync_core/src/consistency_checker/mod.rs b/core/bin/zksync_core/src/consistency_checker/mod.rs index 10102da7c02c..345d07caf872 100644 --- a/core/bin/zksync_core/src/consistency_checker/mod.rs +++ b/core/bin/zksync_core/src/consistency_checker/mod.rs @@ -1,19 +1,12 @@ use std::time::Duration; use zksync_dal::ConnectionPool; -use zksync_types::web3::{ - error, ethabi, - transports::Http, - types::{Address, TransactionId}, - Web3, -}; +use zksync_types::web3::{error, ethabi, transports::Http, types::TransactionId, Web3}; use zksync_types::L1BatchNumber; #[derive(Debug)] pub struct ConsistencyChecker { // ABI of the zkSync contract contract: ethabi::Contract, - // Address of the zkSync contract - contract_addr: Address, // How many past batches to check when starting max_batches_to_recheck: u32, web3: Web3, @@ -23,29 +16,24 @@ pub struct ConsistencyChecker { const SLEEP_DELAY: Duration = Duration::from_secs(5); impl ConsistencyChecker { - pub fn new( - web3_url: &str, - contract_addr: Address, - max_batches_to_recheck: u32, - db: ConnectionPool, - ) -> Self { + pub fn new(web3_url: &str, max_batches_to_recheck: u32, db: ConnectionPool) -> Self { let web3 = Web3::new(Http::new(web3_url).unwrap()); let contract = zksync_contracts::zksync_contract(); Self { web3, contract, - contract_addr, max_batches_to_recheck, db, } } async fn check_commitments(&self, batch_number: L1BatchNumber) -> Result { - let mut storage = self.db.access_storage_blocking(); + let mut storage = self.db.access_storage().await; let storage_block = storage .blocks_dal() .get_storage_block(batch_number) + .await .unwrap_or_else(|| panic!("Block {} not found in the database", batch_number)); let commit_tx_id = storage_block @@ -56,6 +44,7 @@ impl ConsistencyChecker { let block_metadata = storage .blocks_dal() .get_block_with_metadata(storage_block) + .await .unwrap_or_else(|| { panic!( "Block metadata for block {} not found in the database", @@ -66,6 +55,7 @@ impl ConsistencyChecker { let commit_tx_hash = storage .eth_sender_dal() .get_confirmed_tx_hash_by_eth_tx_id(commit_tx_id) + .await .unwrap_or_else(|| { panic!( "Commit tx hash not found in the database. Commit tx id: {}", @@ -100,11 +90,6 @@ impl ConsistencyChecker { Some(1.into()), "Main node gave us a failed commit tx" ); - assert_eq!( - commit_tx.to, - Some(self.contract_addr), - "Main node gave us a commit tx sent to a wrong address" - ); let commitments = self .contract @@ -128,17 +113,20 @@ impl ConsistencyChecker { Ok(commitment == &block_metadata.l1_commit_data()) } - fn last_committed_batch(&self) -> L1BatchNumber { + async fn last_committed_batch(&self) -> L1BatchNumber { self.db - .access_storage_blocking() + .access_storage() + .await .blocks_dal() .get_number_of_last_block_committed_on_eth() + .await .unwrap_or(L1BatchNumber(0)) } pub async fn run(self, stop_receiver: tokio::sync::watch::Receiver) { let mut batch_number: L1BatchNumber = self .last_committed_batch() + .await .0 .saturating_sub(self.max_batches_to_recheck) .max(1) @@ -154,15 +142,17 @@ impl ConsistencyChecker { let batch_has_metadata = self .db - .access_storage_blocking() + .access_storage() + .await .blocks_dal() .get_block_metadata(batch_number) + .await .is_some(); // The batch might be already committed but not yet processed by the external node's tree // OR the batch might be processed by the external node's tree but not yet committed. // We need both. - if !batch_has_metadata || self.last_committed_batch() < batch_number { + if !batch_has_metadata || self.last_committed_batch().await < batch_number { tokio::time::sleep(SLEEP_DELAY).await; continue; } diff --git a/core/bin/zksync_core/src/data_fetchers/mod.rs b/core/bin/zksync_core/src/data_fetchers/mod.rs index ab91728ca818..6ab0453eb09d 100644 --- a/core/bin/zksync_core/src/data_fetchers/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/mod.rs @@ -11,7 +11,7 @@ use tokio::sync::watch; use tokio::task::JoinHandle; -use zksync_config::ZkSyncConfig; +use zksync_config::FetcherConfig; use zksync_dal::ConnectionPool; pub mod error; @@ -20,11 +20,12 @@ pub mod token_price; pub mod token_trading_volume; pub fn run_data_fetchers( - config: &ZkSyncConfig, + config: &FetcherConfig, + network: zksync_types::network::Network, pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> Vec> { - let list_fetcher = token_list::TokenListFetcher::new(config.clone()); + let list_fetcher = token_list::TokenListFetcher::new(config.clone(), network); let price_fetcher = token_price::TokenPriceFetcher::new(config.clone()); let volume_fetcher = token_trading_volume::TradingVolumeFetcher::new(config.clone()); diff --git a/core/bin/zksync_core/src/data_fetchers/token_list/mock.rs b/core/bin/zksync_core/src/data_fetchers/token_list/mock.rs index 1c202ecb74b3..ba6e77ef059b 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_list/mock.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_list/mock.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, fs::read_to_string, path::PathBuf, str::FromStr} use async_trait::async_trait; use serde::{Deserialize, Serialize}; -use zksync_config::ZkSyncConfig; +use zksync_types::network::Network; use zksync_types::{ tokens::{TokenMetadata, ETHEREUM_ADDRESS}, Address, @@ -20,8 +20,8 @@ pub struct MockTokenListFetcher { } impl MockTokenListFetcher { - pub fn new(config: &ZkSyncConfig) -> Self { - let network = config.chain.eth.network.to_string(); + pub fn new(network: Network) -> Self { + let network = network.to_string(); let tokens: HashMap<_, _> = get_genesis_token_list(&network) .into_iter() .map(|item| { diff --git a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs index f8b727e57161..e96d01b97931 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_list/mod.rs @@ -16,8 +16,9 @@ use std::{ use async_trait::async_trait; use tokio::sync::watch; -use zksync_config::{configs::fetcher::TokenListSource, ZkSyncConfig}; +use zksync_config::{configs::fetcher::TokenListSource, FetcherConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::network::Network; use zksync_types::{tokens::TokenMetadata, Address}; use super::error::{ApiFetchError, ErrorAnalyzer}; @@ -33,27 +34,26 @@ pub trait FetcherImpl: std::fmt::Debug + Send + Sync { #[derive(Debug)] pub struct TokenListFetcher { - config: ZkSyncConfig, + config: FetcherConfig, fetcher: Box, error_handler: ErrorAnalyzer, } impl TokenListFetcher { - fn create_fetcher(config: &ZkSyncConfig) -> Box { - let token_list_config = &config.fetcher.token_list; + fn create_fetcher(config: &FetcherConfig, network: Network) -> Box { + let token_list_config = &config.token_list; match token_list_config.source { TokenListSource::OneInch => { - Box::new(one_inch::OneInchTokenListFetcher::new(&config.fetcher)) - as Box + Box::new(one_inch::OneInchTokenListFetcher::new(config)) as Box } TokenListSource::Mock => { - Box::new(mock::MockTokenListFetcher::new(config)) as Box + Box::new(mock::MockTokenListFetcher::new(network)) as Box } } } - pub fn new(config: ZkSyncConfig) -> Self { - let fetcher = Self::create_fetcher(&config); + pub fn new(config: FetcherConfig, network: Network) -> Self { + let fetcher = Self::create_fetcher(&config, network); let error_handler = ErrorAnalyzer::new("TokenListFetcher"); Self { config, @@ -64,7 +64,7 @@ impl TokenListFetcher { pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { let mut fetching_interval = - tokio::time::interval(self.config.fetcher.token_list.fetching_interval()); + tokio::time::interval(self.config.token_list.fetching_interval()); loop { if *stop_receiver.borrow() { @@ -87,11 +87,11 @@ impl TokenListFetcher { }; // We assume that token metadata does not change, thus we only looking for the new tokens. - let mut storage = pool.access_storage_blocking(); - let unknown_tokens = self.load_unknown_tokens(&mut storage); + let mut storage = pool.access_storage().await; + let unknown_tokens = self.load_unknown_tokens(&mut storage).await; token_list.retain(|token, _data| unknown_tokens.contains(token)); - self.update_tokens(&mut storage, token_list); + self.update_tokens(&mut storage, token_list).await; } } @@ -105,21 +105,24 @@ impl TokenListFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - fn update_tokens( + async fn update_tokens( &self, storage: &mut StorageProcessor<'_>, tokens: HashMap, ) { let mut tokens_dal = storage.tokens_dal(); for (token, metadata) in tokens { - tokens_dal.update_well_known_l1_token(&token, metadata); + tokens_dal + .update_well_known_l1_token(&token, metadata) + .await; } } - fn load_unknown_tokens(&self, storage: &mut StorageProcessor<'_>) -> HashSet
{ + async fn load_unknown_tokens(&self, storage: &mut StorageProcessor<'_>) -> HashSet
{ storage .tokens_dal() .get_unknown_l1_token_addresses() + .await .into_iter() .collect() } diff --git a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs index a368f62cb8c0..67ff9236b113 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_price/mod.rs @@ -4,7 +4,7 @@ use std::{collections::HashMap, time::Duration}; use async_trait::async_trait; -use zksync_config::{configs::fetcher::TokenPriceSource, ZkSyncConfig}; +use zksync_config::{configs::fetcher::TokenPriceSource, FetcherConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{tokens::TokenPrice, Address}; @@ -29,17 +29,17 @@ pub trait FetcherImpl: std::fmt::Debug + Send + Sync { #[derive(Debug)] pub struct TokenPriceFetcher { minimum_required_liquidity: Ratio, - config: ZkSyncConfig, + config: FetcherConfig, fetcher: Box, error_handler: ErrorAnalyzer, } impl TokenPriceFetcher { - fn create_fetcher(config: &ZkSyncConfig) -> Box { - let token_price_config = &config.fetcher.token_price; + fn create_fetcher(config: &FetcherConfig) -> Box { + let token_price_config = &config.token_price; match token_price_config.source { TokenPriceSource::CoinGecko => { - Box::new(coingecko::CoinGeckoFetcher::new(&config.fetcher)) as Box + Box::new(coingecko::CoinGeckoFetcher::new(config)) as Box } TokenPriceSource::CoinMarketCap => { unimplemented!() @@ -50,7 +50,7 @@ impl TokenPriceFetcher { } } - pub fn new(config: ZkSyncConfig) -> Self { + pub fn new(config: FetcherConfig) -> Self { let fetcher = Self::create_fetcher(&config); let error_handler = ErrorAnalyzer::new("TokenPriceFetcher"); Self { @@ -65,7 +65,7 @@ impl TokenPriceFetcher { pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { let mut fetching_interval = - tokio::time::interval(self.config.fetcher.token_price.fetching_interval()); + tokio::time::interval(self.config.token_price.fetching_interval()); loop { if *stop_receiver.borrow() { @@ -77,8 +77,8 @@ impl TokenPriceFetcher { self.error_handler.update().await; // We refresh token list in case new tokens were added. - let mut storage = pool.access_storage_blocking(); - let tokens = self.get_tokens(&mut storage); + let mut storage = pool.access_storage().await; + let tokens = self.get_tokens(&mut storage).await; // Vector of received token prices in the format of (`token_addr`, `price_in_usd`, `fetch_timestamp`). let token_prices = match self.fetch_token_price(&tokens).await { @@ -91,7 +91,7 @@ impl TokenPriceFetcher { continue; } }; - self.store_token_prices(&mut storage, token_prices); + self.store_token_prices(&mut storage, token_prices).await; } } @@ -108,22 +108,23 @@ impl TokenPriceFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - fn store_token_prices( + async fn store_token_prices( &self, storage: &mut StorageProcessor<'_>, token_prices: HashMap, ) { let mut tokens_dal = storage.tokens_dal(); for (token, price) in token_prices { - tokens_dal.set_l1_token_price(&token, price); + tokens_dal.set_l1_token_price(&token, price).await; } } /// Returns the list of "interesting" tokens, e.g. ones that can be used to pay fees. /// We don't actually need prices for other tokens. - fn get_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ + async fn get_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ storage .tokens_dal() .get_l1_tokens_by_volume(&self.minimum_required_liquidity) + .await } } diff --git a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs index 165a14dd70fb..396128afbc6c 100644 --- a/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs +++ b/core/bin/zksync_core/src/data_fetchers/token_trading_volume/mod.rs @@ -7,7 +7,7 @@ use std::{collections::HashMap, time::Duration}; use async_trait::async_trait; use tokio::sync::watch; -use zksync_config::{configs::fetcher::TokenTradingVolumeSource, ZkSyncConfig}; +use zksync_config::{configs::fetcher::TokenTradingVolumeSource, FetcherConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{tokens::TokenMarketVolume, Address}; @@ -27,18 +27,17 @@ pub trait FetcherImpl: std::fmt::Debug + Send + Sync { #[derive(Debug)] pub struct TradingVolumeFetcher { - config: ZkSyncConfig, + config: FetcherConfig, fetcher: Box, error_handler: ErrorAnalyzer, } impl TradingVolumeFetcher { - fn create_fetcher(config: &ZkSyncConfig) -> Box { - let token_trading_volume_config = &config.fetcher.token_trading_volume; + fn create_fetcher(config: &FetcherConfig) -> Box { + let token_trading_volume_config = &config.token_trading_volume; match token_trading_volume_config.source { TokenTradingVolumeSource::Uniswap => { - Box::new(uniswap::UniswapTradingVolumeFetcher::new(&config.fetcher)) - as Box + Box::new(uniswap::UniswapTradingVolumeFetcher::new(config)) as Box } TokenTradingVolumeSource::Mock => { Box::new(mock::MockTradingVolumeFetcher::new()) as Box @@ -46,7 +45,7 @@ impl TradingVolumeFetcher { } } - pub fn new(config: ZkSyncConfig) -> Self { + pub fn new(config: FetcherConfig) -> Self { let fetcher = Self::create_fetcher(&config); let error_handler = ErrorAnalyzer::new("TradingVolumeFetcher"); Self { @@ -58,7 +57,7 @@ impl TradingVolumeFetcher { pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { let mut fetching_interval = - tokio::time::interval(self.config.fetcher.token_trading_volume.fetching_interval()); + tokio::time::interval(self.config.token_trading_volume.fetching_interval()); loop { if *stop_receiver.borrow() { vlog::info!("Stop signal received, trading_volume_fetcher is shutting down"); @@ -68,8 +67,8 @@ impl TradingVolumeFetcher { fetching_interval.tick().await; self.error_handler.update().await; - let mut storage = pool.access_storage_blocking(); - let known_l1_tokens = self.load_tokens(&mut storage); + let mut storage = pool.access_storage().await; + let known_l1_tokens = self.load_tokens(&mut storage).await; let trading_volumes = match self.fetch_trading_volumes(&known_l1_tokens).await { Ok(volumes) => { @@ -82,7 +81,8 @@ impl TradingVolumeFetcher { } }; - self.store_market_volumes(&mut storage, trading_volumes); + self.store_market_volumes(&mut storage, trading_volumes) + .await; } } @@ -99,23 +99,24 @@ impl TradingVolumeFetcher { .map_err(|_| ApiFetchError::RequestTimeout)? } - fn store_market_volumes( + async fn store_market_volumes( &self, storage: &mut StorageProcessor<'_>, tokens: HashMap, ) { let mut tokens_dal = storage.tokens_dal(); for (token, volume) in tokens { - tokens_dal.set_l1_token_market_volume(&token, volume); + tokens_dal.set_l1_token_market_volume(&token, volume).await; } } /// Returns the list of tokens with known metadata (if token is not in the list we use, /// it's very likely to not have required level of trading volume anyways). - fn load_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ + async fn load_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ storage .tokens_dal() .get_well_known_token_addresses() + .await .into_iter() .map(|(l1_token, _)| l1_token) .collect() diff --git a/core/bin/zksync_core/src/eth_sender/aggregator.rs b/core/bin/zksync_core/src/eth_sender/aggregator.rs index 7cdd357eb3f0..0c643c0763c4 100644 --- a/core/bin/zksync_core/src/eth_sender/aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/aggregator.rs @@ -1,16 +1,20 @@ -use crate::eth_sender::block_publish_criterion::{ - BlockNumberCriterion, BlockPublishCriterion, DataSizeCriterion, GasCriterion, - TimestampDeadlineCriterion, -}; use zksync_config::configs::eth_sender::{ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; -use zksync_types::aggregated_operations::{ - AggregatedActionType, AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, - BlocksProofOperation, +use zksync_types::{ + aggregated_operations::{ + AggregatedActionType, AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, + BlocksProofOperation, + }, + commitment::BlockWithMetadata, + helpers::unix_timestamp_ms, + L1BatchNumber, +}; + +use crate::eth_sender::block_publish_criterion::{ + BlockNumberCriterion, BlockPublishCriterion, DataSizeCriterion, GasCriterion, + TimestampDeadlineCriterion, }; -use zksync_types::commitment::BlockWithMetadata; -use zksync_types::L1BatchNumber; #[derive(Debug)] pub struct Aggregator { @@ -82,9 +86,10 @@ impl Aggregator { pub async fn get_next_ready_operation( &mut self, storage: &mut StorageProcessor<'_>, + prover_storage: &mut StorageProcessor<'_>, base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Option { - let last_sealed_block_number = storage.blocks_dal().get_sealed_block_number(); + let last_sealed_block_number = storage.blocks_dal().get_sealed_block_number().await; if let Some(op) = self .get_execute_operations( storage, @@ -97,6 +102,7 @@ impl Aggregator { } else if let Some(op) = self .get_proof_operation( storage, + prover_storage, *self.config.aggregated_proof_sizes.iter().max().unwrap(), last_sealed_block_number, ) @@ -121,10 +127,14 @@ impl Aggregator { limit: usize, last_sealed_block: L1BatchNumber, ) -> Option { - let ready_for_execute_blocks = storage.blocks_dal().get_ready_for_execute_blocks( - limit, - self.config.l1_batch_min_age_before_execute_seconds, - ); + let max_l1_batch_timestamp_millis = self + .config + .l1_batch_min_age_before_execute_seconds + .map(|age| unix_timestamp_ms() - age * 1_000); + let ready_for_execute_blocks = storage + .blocks_dal() + .get_ready_for_execute_blocks(limit, max_l1_batch_timestamp_millis) + .await; let blocks = extract_ready_subrange( storage, &mut self.execute_criterion, @@ -145,13 +155,15 @@ impl Aggregator { ) -> Option { let mut blocks_dal = storage.blocks_dal(); - let last_block = blocks_dal.get_last_committed_to_eth_block()?; + let last_block = blocks_dal.get_last_committed_to_eth_block().await?; - let ready_for_commit_blocks = blocks_dal.get_ready_for_commit_blocks( - limit, - base_system_contracts_hashes.bootloader, - base_system_contracts_hashes.default_aa, - ); + let ready_for_commit_blocks = blocks_dal + .get_ready_for_commit_blocks( + limit, + base_system_contracts_hashes.bootloader, + base_system_contracts_hashes.default_aa, + ) + .await; // Check that the blocks that are selected are sequential ready_for_commit_blocks @@ -177,32 +189,53 @@ impl Aggregator { }) } - fn load_real_proof_operation( + async fn load_real_proof_operation( storage: &mut StorageProcessor<'_>, + prover_storage: &mut StorageProcessor<'_>, ) -> Option { - let blocks = storage - .blocks_dal() - .get_ready_for_proof_blocks_real_verifier(1usize); - if !blocks.is_empty() { - let prev_block_number = blocks.first().map(|bl| bl.header.number - 1)?; - let prev_block = storage.blocks_dal().get_block_metadata(prev_block_number)?; - let from = blocks.first().map(|bl| bl.header.number)?; - let to = blocks.last().map(|bl| bl.header.number)?; - let proofs = storage.prover_dal().get_final_proofs_for_blocks(from, to); + let previous_proven_block_number = + storage.blocks_dal().get_last_l1_batch_with_prove_tx().await; + let proofs = prover_storage + .prover_dal() + .get_final_proofs_for_blocks( + previous_proven_block_number + 1, + previous_proven_block_number + 1, + ) + .await; + if proofs.is_empty() { + // The proof for the next block is not generated yet + return None; + } - // currently we only support sending one proof - assert_eq!(proofs.len(), 1); - assert_eq!(from, to); + assert_eq!(proofs.len(), 1); - Some(BlocksProofOperation { - prev_block, - blocks, - proofs, - should_verify: true, - }) - } else { - None - } + let previous_proven_block_metadata = storage + .blocks_dal() + .get_block_metadata(previous_proven_block_number) + .await + .unwrap_or_else(|| { + panic!( + "Block number {} with submitted proof is not complete in the DB", + previous_proven_block_number + ) + }); + let block_to_prove_metadata = storage + .blocks_dal() + .get_block_metadata(previous_proven_block_number + 1) + .await + .unwrap_or_else(|| { + panic!( + "Block number {} with generated proof is not complete in the DB", + previous_proven_block_number + 1 + ) + }); + + Some(BlocksProofOperation { + prev_block: previous_proven_block_metadata, + blocks: vec![block_to_prove_metadata], + proofs, + should_verify: true, + }) } async fn prepare_dummy_proof_operation( @@ -220,7 +253,10 @@ impl Aggregator { .await { let prev_block_number = blocks.first().map(|bl| bl.header.number - 1)?; - let prev_block = storage.blocks_dal().get_block_metadata(prev_block_number)?; + let prev_block = storage + .blocks_dal() + .get_block_metadata(prev_block_number) + .await?; Some(BlocksProofOperation { prev_block, @@ -236,14 +272,19 @@ impl Aggregator { async fn get_proof_operation( &mut self, storage: &mut StorageProcessor<'_>, + prover_storage: &mut StorageProcessor<'_>, limit: usize, last_sealed_block: L1BatchNumber, ) -> Option { match self.config.proof_sending_mode { - ProofSendingMode::OnlyRealProofs => Self::load_real_proof_operation(storage), + ProofSendingMode::OnlyRealProofs => { + Self::load_real_proof_operation(storage, prover_storage).await + } ProofSendingMode::SkipEveryProof => { - let ready_for_proof_blocks = - storage.blocks_dal().get_ready_for_dummy_proof_blocks(limit); + let ready_for_proof_blocks = storage + .blocks_dal() + .get_ready_for_dummy_proof_blocks(limit) + .await; self.prepare_dummy_proof_operation( storage, ready_for_proof_blocks, @@ -253,11 +294,13 @@ impl Aggregator { } ProofSendingMode::OnlySampledProofs => { // if there is a sampled proof then send it, otherwise check for skipped ones. - if let Some(op) = Self::load_real_proof_operation(storage) { + if let Some(op) = Self::load_real_proof_operation(storage, prover_storage).await { Some(op) } else { - let ready_for_proof_blocks = - storage.blocks_dal().get_skipped_for_proof_blocks(limit); + let ready_for_proof_blocks = storage + .blocks_dal() + .get_skipped_for_proof_blocks(limit) + .await; self.prepare_dummy_proof_operation( storage, ready_for_proof_blocks, diff --git a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs b/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs index 75bf1360664e..87c581df69f1 100644 --- a/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs +++ b/core/bin/zksync_core/src/eth_sender/block_publish_criterion.rs @@ -135,7 +135,7 @@ impl GasCriterion { GasCriterion { op, gas_limit } } - fn get_gas_amount( + async fn get_gas_amount( &mut self, storage: &mut StorageProcessor<'_>, block_number: L1BatchNumber, @@ -143,6 +143,7 @@ impl GasCriterion { storage .blocks_dal() .get_blocks_predicted_gas(block_number, block_number, self.op) + .await } } @@ -164,7 +165,7 @@ impl BlockPublishCriterion for GasCriterion { let mut last_block: Option = None; for (index, block) in consecutive_blocks.iter().enumerate() { - let block_gas = self.get_gas_amount(storage, block.header.number); + let block_gas = self.get_gas_amount(storage, block.header.number).await; if block_gas >= gas_left { if index == 0 { panic!( diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 980032d1c0a5..ac1da0d38ade 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -7,7 +7,7 @@ use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; +use zksync_eth_client::BoundEthInterface; use zksync_types::{aggregated_operations::AggregatedOperation, eth_sender::EthTx, Address, H256}; /// The component is responsible for aggregating l1 batches into eth_txs: @@ -39,18 +39,16 @@ impl EthTxAggregator { } } - pub async fn run( + pub async fn run( mut self, pool: ConnectionPool, - eth_client: PKSigningClient, + prover_pool: ConnectionPool, + eth_client: E, stop_receiver: watch::Receiver, ) { loop { - let base_system_contracts_hashes = self - .get_l1_base_system_contracts_hashes(ð_client) - .await - .unwrap(); - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("eth_sender").await; + let mut prover_storage = prover_pool.access_storage_tagged("eth_sender").await; if *stop_receiver.borrow() { vlog::info!("Stop signal received, eth_tx_aggregator is shutting down"); @@ -58,7 +56,7 @@ impl EthTxAggregator { } if let Err(e) = self - .loop_iteration(&mut storage, base_system_contracts_hashes) + .loop_iteration(&mut storage, &mut prover_storage, ð_client) .await { // Web3 API request failures can cause this, @@ -70,9 +68,9 @@ impl EthTxAggregator { } } - async fn get_l1_base_system_contracts_hashes( + async fn get_l1_base_system_contracts_hashes( &mut self, - eth_client: &PKSigningClient, + eth_client: &E, ) -> Result { let bootloader_code_hash: H256 = eth_client .call_main_contract_function( @@ -99,24 +97,27 @@ impl EthTxAggregator { }) } - #[tracing::instrument(skip(self, storage, base_system_contracts_hashes))] - async fn loop_iteration( + #[tracing::instrument(skip(self, storage, eth_client))] + async fn loop_iteration( &mut self, storage: &mut StorageProcessor<'_>, - base_system_contracts_hashes: BaseSystemContractsHashes, + prover_storage: &mut StorageProcessor<'_>, + eth_client: &E, ) -> Result<(), ETHSenderError> { + let base_system_contracts_hashes = + self.get_l1_base_system_contracts_hashes(eth_client).await?; if let Some(agg_op) = self .aggregator - .get_next_ready_operation(storage, base_system_contracts_hashes) + .get_next_ready_operation(storage, prover_storage, base_system_contracts_hashes) .await { - let tx = self.save_eth_tx(storage, &agg_op)?; - Self::log_eth_tx_saving(storage, agg_op, &tx); + let tx = self.save_eth_tx(storage, &agg_op).await?; + Self::log_eth_tx_saving(storage, agg_op, &tx).await; } Ok(()) } - fn log_eth_tx_saving( + async fn log_eth_tx_saving( storage: &mut StorageProcessor<'_>, aggregated_op: AggregatedOperation, tx: &EthTx, @@ -154,7 +155,7 @@ impl EthTxAggregator { (aggregated_op.get_block_range().1.0 - aggregated_op.get_block_range().0.0 + 1) as f64, "type" => aggregated_op.get_action_type().to_string() ); - track_eth_tx_metrics(storage, "save", tx); + track_eth_tx_metrics(storage, "save", tx).await; } fn encode_aggregated_op(&self, op: &AggregatedOperation) -> Vec { @@ -176,40 +177,47 @@ impl EthTxAggregator { .to_vec() } - pub(super) fn save_eth_tx( + pub(super) async fn save_eth_tx( &self, storage: &mut StorageProcessor<'_>, aggregated_op: &AggregatedOperation, ) -> Result { - let mut transaction = storage.start_transaction_blocking(); - let nonce = self.get_next_nonce(&mut transaction)?; + let mut transaction = storage.start_transaction().await; + let nonce = self.get_next_nonce(&mut transaction).await?; let calldata = self.encode_aggregated_op(aggregated_op); let (first_block, last_block) = aggregated_op.get_block_range(); let op_type = aggregated_op.get_action_type(); - let blocks_predicted_gas = - transaction - .blocks_dal() - .get_blocks_predicted_gas(first_block, last_block, op_type); + let blocks_predicted_gas = transaction + .blocks_dal() + .get_blocks_predicted_gas(first_block, last_block, op_type) + .await; let eth_tx_predicted_gas = agg_block_base_cost(op_type) + blocks_predicted_gas; - let eth_tx = transaction.eth_sender_dal().save_eth_tx( - nonce, - calldata, - op_type, - self.contract_address, - eth_tx_predicted_gas, - ); + let eth_tx = transaction + .eth_sender_dal() + .save_eth_tx( + nonce, + calldata, + op_type, + self.contract_address, + eth_tx_predicted_gas, + ) + .await; transaction .blocks_dal() - .set_eth_tx_id(first_block, last_block, eth_tx.id, op_type); - transaction.commit_blocking(); + .set_eth_tx_id(first_block, last_block, eth_tx.id, op_type) + .await; + transaction.commit().await; Ok(eth_tx) } - fn get_next_nonce(&self, storage: &mut StorageProcessor<'_>) -> Result { - let db_nonce = storage.eth_sender_dal().get_next_nonce().unwrap_or(0); + async fn get_next_nonce( + &self, + storage: &mut StorageProcessor<'_>, + ) -> Result { + let db_nonce = storage.eth_sender_dal().get_next_nonce().await.unwrap_or(0); // Between server starts we can execute some txs using operator account or remove some txs from the database // At the start we have to consider this fact and get the max nonce. Ok(max(db_nonce, self.base_nonce)) diff --git a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs index 285c67363070..de3703e8b37f 100644 --- a/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/bin/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -10,7 +10,7 @@ use zksync_eth_client::{ use zksync_types::{ eth_sender::EthTx, web3::{contract::Options, error::Error as Web3Error}, - L1BlockNumber, H256, U256, + L1BlockNumber, Nonce, H256, U256, }; use zksync_utils::time::seconds_since_epoch; @@ -23,12 +23,18 @@ struct EthFee { priority_fee_per_gas: u64, } -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] struct OperatorNonce { - // Nonce on block `current_block - self.wait_confirmations` - lagging: u64, - // Nonce on block `current_block` - current: u64, + // Nonce on finalized block + finalized: Nonce, + // Nonce on latest block + latest: Nonce, +} + +#[derive(Debug, Clone, Copy)] +pub(super) struct L1BlockNumbers { + pub finalized: L1BlockNumber, + pub latest: L1BlockNumber, } /// The component is responsible for managing sending eth_txs attempts: @@ -56,48 +62,44 @@ where } } - async fn get_tx_status_and_confirmations_count( + async fn get_tx_status( &self, tx_hash: H256, - current_block: L1BlockNumber, - ) -> Result, ETHSenderError> { - let status = self - .ethereum_gateway + ) -> Result, ETHSenderError> { + self.ethereum_gateway .get_tx_status(tx_hash, "eth_tx_manager") - .await?; - if let Some(status) = status { - // Amount of confirmations for a block containing the transaction. - let confirmations = (current_block.0 as u64) - .saturating_sub(status.receipt.block_number.unwrap().as_u64()); - return Ok(Some((status, confirmations))); - } - Ok(None) + .await + .map_err(Into::into) } async fn check_all_sending_attempts( &self, storage: &mut StorageProcessor<'_>, op: &EthTx, - current_block: L1BlockNumber, - ) -> Option<(ExecutedTxStatus, u64)> { + ) -> Option { // Checking history items, starting from most recently sent. - for history_item in storage.eth_sender_dal().get_tx_history_to_check(op.id) { + for history_item in storage + .eth_sender_dal() + .get_tx_history_to_check(op.id) + .await + { // `status` is a Result here and we don't unwrap it with `?` // because if we do and get an `Err`, we won't finish the for loop, // which means we might miss the transaction that actually succeeded. - match self - .get_tx_status_and_confirmations_count(history_item.tx_hash, current_block) - .await - { + match self.get_tx_status(history_item.tx_hash).await { Ok(Some(s)) => return Some(s), Ok(_) => continue, - Err(err) => vlog::warn!("Can't check transaction {:?}", err), + Err(err) => vlog::warn!( + "Can't check transaction {:?}: {:?}", + history_item.tx_hash, + err + ), } } None } - fn calculate_fee( + async fn calculate_fee( &self, storage: &mut StorageProcessor<'_>, tx: &EthTx, @@ -107,8 +109,9 @@ where let priority_fee_per_gas = if time_in_mempool != 0 { metrics::increment_counter!("server.eth_sender.transaction_resent"); - let priority_fee_per_gas = - self.increase_priority_fee(storage, tx.id, base_fee_per_gas)?; + let priority_fee_per_gas = self + .increase_priority_fee(storage, tx.id, base_fee_per_gas) + .await?; vlog::info!( "Resending operation {} with base fee {:?} and priority fee {:?}", tx.id, @@ -135,7 +138,7 @@ where }) } - fn increase_priority_fee( + async fn increase_priority_fee( &self, storage: &mut StorageProcessor<'_>, eth_tx_id: u32, @@ -144,6 +147,7 @@ where let previous_sent_tx = storage .eth_sender_dal() .get_last_sent_eth_tx(eth_tx_id) + .await .unwrap(); let previous_base_fee = previous_sent_tx.base_fee_per_gas; @@ -179,7 +183,7 @@ where let EthFee { base_fee_per_gas, priority_fee_per_gas, - } = self.calculate_fee(storage, tx, time_in_mempool)?; + } = self.calculate_fee(storage, tx, time_in_mempool).await?; metrics::histogram!( "server.eth_sender.used_base_fee_per_gas", @@ -195,13 +199,17 @@ where .sign_tx(tx, base_fee_per_gas, priority_fee_per_gas) .await; - if let Some(tx_history_id) = storage.eth_sender_dal().insert_tx_history( - tx.id, - base_fee_per_gas, - priority_fee_per_gas, - signed_tx.hash, - signed_tx.raw_tx.clone(), - ) { + if let Some(tx_history_id) = storage + .eth_sender_dal() + .insert_tx_history( + tx.id, + base_fee_per_gas, + priority_fee_per_gas, + signed_tx.hash, + signed_tx.raw_tx.clone(), + ) + .await + { if let Err(error) = self .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx, current_block) .await @@ -229,11 +237,15 @@ where Ok(tx_hash) => { storage .eth_sender_dal() - .set_sent_at_block(tx_history_id, current_block.0); + .set_sent_at_block(tx_history_id, current_block.0) + .await; Ok(tx_hash) } Err(error) => { - storage.eth_sender_dal().remove_tx_history(tx_history_id); + storage + .eth_sender_dal() + .remove_tx_history(tx_history_id) + .await; Err(error.into()) } } @@ -241,25 +253,50 @@ where async fn get_operator_nonce( &self, - current_block: L1BlockNumber, + block_numbers: L1BlockNumbers, ) -> Result { - let lagging = self + let finalized = self .ethereum_gateway - .nonce_at( - current_block - .saturating_sub(self.config.wait_confirmations as u32) - .into(), - "eth_tx_manager", - ) + .nonce_at(block_numbers.finalized.0.into(), "eth_tx_manager") .await? - .as_u64(); + .as_u32() + .into(); - let current = self + let latest = self .ethereum_gateway - .current_nonce("eth_tx_manager") + .nonce_at(block_numbers.latest.0.into(), "eth_tx_manager") .await? - .as_u64(); - Ok(OperatorNonce { lagging, current }) + .as_u32() + .into(); + Ok(OperatorNonce { finalized, latest }) + } + + async fn get_l1_block_numbers(&self) -> Result { + let finalized = if let Some(confirmations) = self.config.wait_confirmations { + let latest_block_number = self + .ethereum_gateway + .block_number("eth_tx_manager") + .await? + .as_u64(); + (latest_block_number.saturating_sub(confirmations) as u32).into() + } else { + self.ethereum_gateway + .block("finalized".to_string(), "eth_tx_manager") + .await? + .expect("Finalized block must be present on L1") + .number + .expect("Finalized block must contain number") + .as_u32() + .into() + }; + + let latest = self + .ethereum_gateway + .block_number("eth_tx_manager") + .await? + .as_u32() + .into(); + Ok(L1BlockNumbers { finalized, latest }) } // Monitors the inflight transactions, marks mined ones as confirmed, @@ -267,75 +304,75 @@ where pub(super) async fn monitor_inflight_transactions( &mut self, storage: &mut StorageProcessor<'_>, - current_block: L1BlockNumber, + l1_block_numbers: L1BlockNumbers, ) -> Result, ETHSenderError> { metrics::gauge!( "server.eth_sender.last_known_l1_block", - current_block.0 as f64 + l1_block_numbers.latest.0 as f64 ); - let operator_nonce = self.get_operator_nonce(current_block).await?; + let operator_nonce = self.get_operator_nonce(l1_block_numbers).await?; - let inflight_txs = storage.eth_sender_dal().get_inflight_txs(); + let inflight_txs = storage.eth_sender_dal().get_inflight_txs().await; metrics::gauge!( "server.eth_sender.number_of_inflight_txs", inflight_txs.len() as f64, ); + vlog::trace!( + "Going through not confirmed txs. \ + Block numbers: latest {}, finalized {}, \ + operator's nonce: latest {}, finalized {}", + l1_block_numbers.latest, + l1_block_numbers.finalized, + operator_nonce.latest, + operator_nonce.finalized, + ); + // Not confirmed transactions, ordered by nonce for tx in inflight_txs { - vlog::trace!( - "Going through not confirmed txs. \ - Current block: {}, current tx id: {}, \ - sender's nonce on block `current block - number of confirmations`: {}", - current_block, - tx.id, - operator_nonce.lagging - ); + vlog::trace!("Checking tx id: {}", tx.id,); - // If the `current_sender_nonce` <= `tx.nonce`, this means + // If the `operator_nonce.latest` <= `tx.nonce`, this means // that `tx` is not mined and we should resend it. // We only resend the first unmined transaction. - if operator_nonce.current <= tx.nonce { + if operator_nonce.latest <= tx.nonce { // None means txs hasn't been sent yet let first_sent_at_block = storage .eth_sender_dal() .get_block_number_on_first_sent_attempt(tx.id) - .unwrap_or(current_block.0); + .await + .unwrap_or(l1_block_numbers.latest.0); return Ok(Some((tx, first_sent_at_block))); } - // If on block `current_block - self.wait_confirmations` - // sender's nonce was > tx.nonce, then `tx` is mined and confirmed (either successful or reverted). + // If on finalized block sender's nonce was > tx.nonce, + // then `tx` is mined and confirmed (either successful or reverted). // Only then we will check the history to find the receipt. // Otherwise, `tx` is mined but not confirmed, so we skip to the next one. - if operator_nonce.lagging <= tx.nonce { + if operator_nonce.finalized <= tx.nonce { continue; } vlog::trace!( - "Sender's nonce on block `current block - number of confirmations` is greater than current tx's nonce. \ + "Sender's nonce on finalized block is greater than current tx's nonce. \ Checking transaction with id {}. Tx nonce is equal to {}", tx.id, tx.nonce, ); - match self - .check_all_sending_attempts(storage, &tx, current_block) - .await - { - Some((tx_status, confirmations)) => { - self.apply_tx_status(storage, &tx, tx_status, confirmations, current_block) + match self.check_all_sending_attempts(storage, &tx).await { + Some(tx_status) => { + self.apply_tx_status(storage, &tx, tx_status, l1_block_numbers.finalized) .await; } None => { // The nonce has increased but we did not find the receipt. // This is an error because such a big reorg may cause transactions that were // previously recorded as confirmed to become pending again and we have to - // make sure it's not the case - otherwire eth_sender may not work properly. + // make sure it's not the case - otherwise eth_sender may not work properly. vlog::error!( - "Possible block reorgs: nonce increase detected {} blocks ago, but no tx receipt found for tx {:?}", - self.config.wait_confirmations, + "Possible block reorgs: finalized nonce increase detected, but no tx receipt found for tx {:?}", &tx ); } @@ -358,7 +395,7 @@ where opt.gas = Some(self.config.max_aggregated_tx_gas.into()); opt.max_fee_per_gas = Some(U256::from(base_fee_per_gas + priority_fee_per_gas)); opt.max_priority_fee_per_gas = Some(U256::from(priority_fee_per_gas)); - opt.nonce = Some(tx.nonce.into()); + opt.nonce = Some(tx.nonce.0.into()); }), "eth_tx_manager", ) @@ -369,31 +406,36 @@ where async fn send_unsent_txs( &mut self, storage: &mut StorageProcessor<'_>, - current_block: L1BlockNumber, + l1_block_numbers: L1BlockNumbers, ) { - for tx in storage.eth_sender_dal().get_unsent_txs() { + for tx in storage.eth_sender_dal().get_unsent_txs().await { // Check already sent txs not marked as sent and mark them as sent. // The common reason for this behaviour is that we sent tx and stop the server // before updating the database - let tx_status = self - .get_tx_status_and_confirmations_count(tx.tx_hash, current_block) - .await; + let tx_status = self.get_tx_status(tx.tx_hash).await; - if let Ok(Some((tx_status, confirmations))) = tx_status { + if let Ok(Some(tx_status)) = tx_status { vlog::info!("The tx {:?} has been already sent", tx.tx_hash); storage .eth_sender_dal() - .set_sent_at_block(tx.id, tx_status.receipt.block_number.unwrap().as_u32()); + .set_sent_at_block(tx.id, tx_status.receipt.block_number.unwrap().as_u32()) + .await; let eth_tx = storage .eth_sender_dal() .get_eth_tx(tx.eth_tx_id) + .await .expect("Eth tx should exist"); - self.apply_tx_status(storage, ð_tx, tx_status, confirmations, current_block) + self.apply_tx_status(storage, ð_tx, tx_status, l1_block_numbers.finalized) .await; } else if let Err(error) = self - .send_raw_transaction(storage, tx.id, tx.signed_raw_tx.clone(), current_block) + .send_raw_transaction( + storage, + tx.id, + tx.signed_raw_tx.clone(), + l1_block_numbers.latest, + ) .await { vlog::warn!("Error {:?} in sending tx {:?}", error, &tx); @@ -406,22 +448,20 @@ where storage: &mut StorageProcessor<'_>, tx: &EthTx, tx_status: ExecutedTxStatus, - confirmations: u64, - current_block: L1BlockNumber, + finalized_block: L1BlockNumber, ) { - if confirmations >= self.config.wait_confirmations { + let receipt_block_number = tx_status.receipt.block_number.unwrap().as_u32(); + if receipt_block_number <= finalized_block.0 { if tx_status.success { - self.confirm_tx(storage, tx, tx_status, current_block); + self.confirm_tx(storage, tx, tx_status).await; } else { self.fail_tx(storage, tx, tx_status).await; } } else { vlog::debug!( - "Transaction {} with id {} has {} out of {} required confirmations", + "Transaction {} with id {} is not yet finalized: block in receipt {receipt_block_number}, finalized block {finalized_block}", tx_status.tx_hash, tx.id, - confirmations, - self.config.wait_confirmations ); } } @@ -432,7 +472,10 @@ where tx: &EthTx, tx_status: ExecutedTxStatus, ) { - storage.eth_sender_dal().mark_failed_transaction(tx.id); + storage + .eth_sender_dal() + .mark_failed_transaction(tx.id) + .await; let failure_reason = self .ethereum_gateway .failure_reason(tx_status.receipt.transaction_hash) @@ -450,12 +493,11 @@ where panic!("We can't operate after tx fail"); } - pub fn confirm_tx( + pub async fn confirm_tx( &self, storage: &mut StorageProcessor<'_>, tx: &EthTx, tx_status: ExecutedTxStatus, - current_block: L1BlockNumber, ) { let tx_hash = tx_status.receipt.transaction_hash; let gas_used = tx_status @@ -465,9 +507,10 @@ where storage .eth_sender_dal() - .confirm_tx(tx_status.tx_hash, gas_used); + .confirm_tx(tx_status.tx_hash, gas_used) + .await; - track_eth_tx_metrics(storage, "mined", tx); + track_eth_tx_metrics(storage, "mined", tx).await; if gas_used > U256::from(tx.predicted_gas_cost) { vlog::error!( @@ -499,32 +542,27 @@ where let sent_at_block = storage .eth_sender_dal() .get_block_number_on_first_sent_attempt(tx.id) + .await .unwrap_or(0); metrics::histogram!( "server.eth_sender.l1_blocks_waited_in_mempool", - (current_block.0 - sent_at_block - self.config.wait_confirmations as u32) as f64, + (tx_status.receipt.block_number.unwrap().as_u32() - sent_at_block) as f64, "type" => tx.tx_type.to_string() ); } pub async fn run(mut self, pool: ConnectionPool, stop_receiver: watch::Receiver) { { - let current_block = L1BlockNumber( - self.ethereum_gateway - .block_number("etx_tx_manager") - .await - .unwrap() - .as_u32(), - ); - let mut storage = pool.access_storage_blocking(); - self.send_unsent_txs(&mut storage, current_block).await; + let l1_block_numbers = self.get_l1_block_numbers().await.unwrap(); + let mut storage = pool.access_storage_tagged("eth_sender").await; + self.send_unsent_txs(&mut storage, l1_block_numbers).await; } // It's mandatory to set last_known_l1_block to zero, otherwise the first iteration // will never check inflight txs status let mut last_known_l1_block = L1BlockNumber(0); loop { - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("eth_sender").await; if *stop_receiver.borrow() { vlog::info!("Stop signal received, eth_tx_manager is shutting down"); @@ -549,7 +587,7 @@ where storage: &mut StorageProcessor<'_>, current_block: L1BlockNumber, ) { - let number_inflight_txs = storage.eth_sender_dal().get_inflight_txs().len(); + let number_inflight_txs = storage.eth_sender_dal().get_inflight_txs().await.len(); let number_of_available_slots_for_eth_txs = self .config .max_txs_in_flight @@ -559,7 +597,8 @@ where // Get the new eth tx and create history item for them let new_eth_tx = storage .eth_sender_dal() - .get_new_eth_txs(number_of_available_slots_for_eth_txs); + .get_new_eth_txs(number_of_available_slots_for_eth_txs) + .await; for tx in new_eth_tx { let _ = self.send_eth_tx(storage, &tx, 0, current_block).await; @@ -573,35 +612,31 @@ where storage: &mut StorageProcessor<'_>, previous_block: L1BlockNumber, ) -> Result { - let current_block = L1BlockNumber( - self.ethereum_gateway - .block_number("eth_tx_manager") - .await? - .as_u32(), - ); + let l1_block_numbers = self.get_l1_block_numbers().await?; - self.send_new_eth_txs(storage, current_block).await; + self.send_new_eth_txs(storage, l1_block_numbers.latest) + .await; - if current_block <= previous_block { + if l1_block_numbers.latest <= previous_block { // Nothing to do - no new blocks were mined. - return Ok(current_block); + return Ok(previous_block); } if let Some((tx, sent_at_block)) = self - .monitor_inflight_transactions(storage, current_block) + .monitor_inflight_transactions(storage, l1_block_numbers) .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = current_block.0 - sent_at_block; + let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, current_block) + .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) .await; } - Ok(current_block) + Ok(l1_block_numbers.latest) } } diff --git a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs index d3fe029bf230..bd8250344971 100644 --- a/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs +++ b/core/bin/zksync_core/src/eth_sender/grafana_metrics.rs @@ -3,11 +3,18 @@ use zksync_dal::StorageProcessor; use zksync_types::eth_sender::EthTx; use zksync_utils::time::seconds_since_epoch; -pub fn track_eth_tx_metrics(connection: &mut StorageProcessor<'_>, l1_stage: &str, tx: &EthTx) { +pub async fn track_eth_tx_metrics( + connection: &mut StorageProcessor<'_>, + l1_stage: &str, + tx: &EthTx, +) { let start = Instant::now(); let stage = format!("l1_{}_{}", l1_stage, tx.tx_type.to_string()); - let blocks = connection.blocks_dal().get_blocks_for_eth_tx_id(tx.id); + let blocks = connection + .blocks_dal() + .get_blocks_for_eth_tx_id(tx.id) + .await; // This should be only the case when some blocks were reverted. if blocks.is_empty() { diff --git a/core/bin/zksync_core/src/eth_sender/tests.rs b/core/bin/zksync_core/src/eth_sender/tests.rs index 8cf04add1538..bbf7a0f6700b 100644 --- a/core/bin/zksync_core/src/eth_sender/tests.rs +++ b/core/bin/zksync_core/src/eth_sender/tests.rs @@ -1,18 +1,27 @@ -use crate::eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; -use crate::l1_gas_price::GasAdjuster; +use std::sync::{atomic::Ordering, Arc}; + use db_test_macro::db_test; use zksync_config::{ configs::eth_sender::{ProofSendingMode, SenderConfig}, ETHSenderConfig, GasAdjusterConfig, }; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::mock::MockEthereum, EthInterface}; use zksync_types::{ - aggregated_operations::{AggregatedOperation, BlocksExecuteOperation}, - Address, L1BlockNumber, + aggregated_operations::{ + AggregatedOperation, BlocksCommitOperation, BlocksExecuteOperation, BlocksProofOperation, + }, + block::L1BatchHeader, + commitment::{BlockMetaParameters, BlockMetadata, BlockWithMetadata}, + helpers::unix_timestamp_ms, + Address, L1BatchNumber, L1BlockNumber, H256, }; -use std::sync::Arc; +use crate::eth_sender::{ + eth_tx_manager::L1BlockNumbers, Aggregator, EthTxAggregator, EthTxManager, +}; +use crate::l1_gas_price::GasAdjuster; // Alias to conveniently call static methods of ETHSender. type MockEthTxManager = EthTxManager, GasAdjuster>>; @@ -31,16 +40,32 @@ struct EthSenderTester { impl EthSenderTester { const WAIT_CONFIRMATIONS: u64 = 10; - const MAX_BASE_FEE_SAMPLES: usize = 7; + const MAX_BASE_FEE_SAMPLES: usize = 3; - async fn new(connection_pool: ConnectionPool, history: Vec) -> Self { + async fn new( + connection_pool: ConnectionPool, + history: Vec, + non_ordering_confirmations: bool, + ) -> Self { let eth_sender_config = ETHSenderConfig::from_env(); let aggregator_config = SenderConfig { aggregated_proof_sizes: vec![1], ..eth_sender_config.sender.clone() }; - let gateway = Arc::new(MockEthereum::default().with_fee_history(history)); + let gateway = Arc::new( + MockEthereum::default() + .with_fee_history( + std::iter::repeat(0) + .take(Self::WAIT_CONFIRMATIONS as usize) + .chain(history) + .collect(), + ) + .with_non_ordering_confirmation(non_ordering_confirmations), + ); + gateway + .block_number + .fetch_add(Self::WAIT_CONFIRMATIONS, Ordering::Relaxed); let gas_adjuster = Arc::new( GasAdjuster::new( @@ -58,7 +83,6 @@ impl EthSenderTester { let aggregator = EthTxAggregator::new( SenderConfig { - wait_confirmations: Self::WAIT_CONFIRMATIONS, proof_sending_mode: ProofSendingMode::SkipEveryProof, ..eth_sender_config.sender.clone() }, @@ -70,10 +94,7 @@ impl EthSenderTester { ); let manager = EthTxManager::new( - SenderConfig { - wait_confirmations: Self::WAIT_CONFIRMATIONS, - ..eth_sender_config.sender - }, + eth_sender_config.sender, gas_adjuster.clone(), gateway.clone(), ); @@ -89,19 +110,26 @@ impl EthSenderTester { async fn storage(&self) -> StorageProcessor<'static> { self.conn.access_test_storage().await } + + async fn get_block_numbers(&self) -> L1BlockNumbers { + let latest = self.gateway.block_number("").await.unwrap().as_u32().into(); + let finalized = latest - Self::WAIT_CONFIRMATIONS as u32; + L1BlockNumbers { finalized, latest } + } } // Tests that we send multiple transactions and confirm them all in one iteration. #[db_test] async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { - let mut tester = EthSenderTester::new(connection_pool.clone(), vec![10; 100]).await; + let mut tester = EthSenderTester::new(connection_pool, vec![10; 100], false).await; let mut hashes = vec![]; for _ in 0..5 { let tx = tester .aggregator - .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION)?; + .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) + .await?; let hash = tester .manager .send_eth_tx( @@ -122,6 +150,7 @@ async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 5 ); @@ -136,7 +165,7 @@ async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { .manager .monitor_inflight_transactions( &mut tester.storage().await, - L1BlockNumber(tester.gateway.block_number("a").await.unwrap().as_u32()), + tester.get_block_numbers().await, ) .await?; @@ -147,6 +176,7 @@ async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 0 ); @@ -160,7 +190,7 @@ async fn confirm_many(connection_pool: ConnectionPool) -> anyhow::Result<()> { // Tests that we resend first unmined transaction every block with an increased gas price. #[db_test] async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<()> { - let mut tester = EthSenderTester::new(connection_pool.clone(), vec![7, 6, 5, 4, 3, 2, 1]).await; + let mut tester = EthSenderTester::new(connection_pool, vec![7, 6, 5, 5, 5, 2, 1], false).await; // after this, median should be 6 tester.gateway.advance_block_number(3); @@ -169,7 +199,8 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() let block = L1BlockNumber(tester.gateway.block_number("").await?.as_u32()); let tx = tester .aggregator - .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION)?; + .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) + .await?; let hash = tester .manager @@ -184,6 +215,7 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 1 ); @@ -196,17 +228,22 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() // now, median is 5 tester.gateway.advance_block_number(2); tester.gas_adjuster.keep_updated().await?; - let block = L1BlockNumber(tester.gateway.block_number("").await?.as_u32()); + let block_numbers = tester.get_block_numbers().await; let (to_resend, _) = tester .manager - .monitor_inflight_transactions(&mut tester.storage().await, block) + .monitor_inflight_transactions(&mut tester.storage().await, block_numbers) .await? .unwrap(); let resent_hash = tester .manager - .send_eth_tx(&mut tester.storage().await, &to_resend, 1, block) + .send_eth_tx( + &mut tester.storage().await, + &to_resend, + 1, + block_numbers.latest, + ) .await?; // check that transaction has been resent @@ -217,6 +254,7 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 1 ); @@ -232,10 +270,11 @@ async fn resend_each_block(connection_pool: ConnectionPool) -> anyhow::Result<() // we won't mark it as confirmed but also won't resend it. #[db_test] async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::Result<()> { - let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100]).await; + let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false).await; let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) + .await .unwrap(); let hash = tester @@ -257,6 +296,7 @@ async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::R .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 1 ); @@ -270,7 +310,7 @@ async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::R .manager .monitor_inflight_transactions( &mut tester.storage().await, - L1BlockNumber(tester.gateway.block_number("a").await.unwrap().as_u32()), + tester.get_block_numbers().await, ) .await?; @@ -281,6 +321,7 @@ async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::R .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 1 ); @@ -293,13 +334,14 @@ async fn dont_resend_already_mined(connection_pool: ConnectionPool) -> anyhow::R #[db_test] async fn three_scenarios(connection_pool: ConnectionPool) -> anyhow::Result<()> { - let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100]).await; + let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false).await; let mut hashes = vec![]; for _ in 0..3 { let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) + .await .unwrap(); let hash = tester @@ -333,7 +375,7 @@ async fn three_scenarios(connection_pool: ConnectionPool) -> anyhow::Result<()> .manager .monitor_inflight_transactions( &mut tester.storage().await, - L1BlockNumber(tester.gateway.block_number("a").await.unwrap().as_u32()), + tester.get_block_numbers().await, ) .await? .expect("we should be trying to resend the last tx"); @@ -345,12 +387,13 @@ async fn three_scenarios(connection_pool: ConnectionPool) -> anyhow::Result<()> .await .eth_sender_dal() .get_inflight_txs() + .await .len(), 2 ); // last sent transaction has nonce == 2, because they start from 0 - assert_eq!(to_resend.nonce, 2); + assert_eq!(to_resend.nonce.0, 2); Ok(()) } @@ -358,11 +401,12 @@ async fn three_scenarios(connection_pool: ConnectionPool) -> anyhow::Result<()> #[should_panic(expected = "We can't operate after tx fail")] #[db_test] async fn failed_eth_tx(connection_pool: ConnectionPool) { - let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100]).await; + let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false).await; let tx = tester .aggregator .save_eth_tx(&mut tester.storage().await, &DUMMY_OPERATION) + .await .unwrap(); let hash = tester @@ -385,7 +429,288 @@ async fn failed_eth_tx(connection_pool: ConnectionPool) { .manager .monitor_inflight_transactions( &mut tester.storage().await, - L1BlockNumber(tester.gateway.block_number("a").await.unwrap().as_u32()), + tester.get_block_numbers().await, + ) + .await + .unwrap(); +} + +fn block_metadata(header: &L1BatchHeader) -> BlockWithMetadata { + BlockWithMetadata { + header: header.clone(), + metadata: BlockMetadata { + root_hash: Default::default(), + rollup_last_leaf_index: 0, + merkle_root_hash: Default::default(), + initial_writes_compressed: vec![], + repeated_writes_compressed: vec![], + commitment: Default::default(), + l2_l1_messages_compressed: vec![], + l2_l1_merkle_root: Default::default(), + block_meta_params: BlockMetaParameters { + zkporter_is_available: false, + bootloader_code_hash: Default::default(), + default_aa_code_hash: Default::default(), + }, + aux_data_hash: Default::default(), + meta_parameters_hash: Default::default(), + pass_through_data_hash: Default::default(), + }, + factory_deps: vec![], + } +} + +#[db_test] +async fn correct_order_for_confirmations(connection_pool: ConnectionPool) -> anyhow::Result<()> { + let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; + let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; + let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; + let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; + commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + execute_blocks(&mut tester, vec![first_block.clone()], true).await; + commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, None) + .await; + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].header.number.0, 2); + + execute_blocks(&mut tester, vec![second_block.clone()], true).await; + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, None) + .await; + assert_eq!(blocks.len(), 0); + Ok(()) +} + +#[db_test] +async fn skipped_block_at_the_start(connection_pool: ConnectionPool) -> anyhow::Result<()> { + let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; + let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; + let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; + let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; + commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + execute_blocks(&mut tester, vec![first_block.clone()], true).await; + commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + execute_blocks(&mut tester, vec![second_block.clone()], true).await; + + let third_block = insert_block(&mut tester, L1BatchNumber(3)).await; + let fourth_block = insert_block(&mut tester, L1BatchNumber(4)).await; + // DO NOT CONFIRM THIRD BLOCK + let third_block_commit_tx_hash = commit_block( + &mut tester, + second_block.clone(), + third_block.clone(), + false, + ) + .await; + + proof_block(&mut tester, second_block.clone(), third_block.clone(), true).await; + commit_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; + proof_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, Some(unix_timestamp_ms())) + .await; + assert_eq!(blocks.len(), 2); + + confirm_tx(&mut tester, third_block_commit_tx_hash).await; + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, Some(unix_timestamp_ms())) + .await; + assert_eq!(blocks.len(), 2); + Ok(()) +} + +#[db_test] +async fn skipped_block_in_the_middle(connection_pool: ConnectionPool) -> anyhow::Result<()> { + let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true).await; + let zero_block = insert_block(&mut tester, L1BatchNumber(0)).await; + let first_block = insert_block(&mut tester, L1BatchNumber(1)).await; + let second_block = insert_block(&mut tester, L1BatchNumber(2)).await; + commit_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + proof_block(&mut tester, zero_block.clone(), first_block.clone(), true).await; + execute_blocks(&mut tester, vec![first_block.clone()], true).await; + commit_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + proof_block(&mut tester, first_block.clone(), second_block.clone(), true).await; + + let third_block = insert_block(&mut tester, L1BatchNumber(3)).await; + let fourth_block = insert_block(&mut tester, L1BatchNumber(4)).await; + // DO NOT CONFIRM THIRD BLOCK + let third_block_commit_tx_hash = commit_block( + &mut tester, + second_block.clone(), + third_block.clone(), + false, + ) + .await; + + proof_block(&mut tester, second_block.clone(), third_block.clone(), true).await; + commit_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; + proof_block(&mut tester, third_block.clone(), fourth_block.clone(), true).await; + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, None) + .await; + // We should return all block including third block + assert_eq!(blocks.len(), 3); + assert_eq!(blocks[0].header.number.0, 2); + + confirm_tx(&mut tester, third_block_commit_tx_hash).await; + let blocks = tester + .storage() + .await + .blocks_dal() + .get_ready_for_execute_blocks(45, None) + .await; + assert_eq!(blocks.len(), 3); + Ok(()) +} + +async fn insert_block(tester: &mut EthSenderTester, number: L1BatchNumber) -> L1BatchHeader { + let mut block = L1BatchHeader::new( + number, + 0, + Address::zero(), + BaseSystemContractsHashes { + bootloader: Default::default(), + default_aa: Default::default(), + }, + ); + block.is_finished = true; + // save block to the database + tester + .storage() + .await + .blocks_dal() + .insert_l1_batch(&block, Default::default()) + .await; + tester + .storage() + .await + .blocks_dal() + .save_blocks_metadata( + block.number, + &BlockMetadata { + root_hash: Default::default(), + rollup_last_leaf_index: 0, + merkle_root_hash: Default::default(), + initial_writes_compressed: vec![], + repeated_writes_compressed: vec![], + commitment: Default::default(), + l2_l1_messages_compressed: vec![], + l2_l1_merkle_root: Default::default(), + block_meta_params: BlockMetaParameters { + zkporter_is_available: false, + bootloader_code_hash: Default::default(), + default_aa_code_hash: Default::default(), + }, + aux_data_hash: Default::default(), + meta_parameters_hash: Default::default(), + pass_through_data_hash: Default::default(), + }, + Default::default(), + ) + .await; + block +} + +async fn execute_blocks( + tester: &mut EthSenderTester, + blocks: Vec, + confirm: bool, +) -> H256 { + let operation = AggregatedOperation::ExecuteBlocks(BlocksExecuteOperation { + blocks: blocks.iter().map(block_metadata).collect(), + }); + send_operation(tester, operation, confirm).await +} + +async fn proof_block( + tester: &mut EthSenderTester, + last_committed_block: L1BatchHeader, + block: L1BatchHeader, + confirm: bool, +) -> H256 { + let operation = AggregatedOperation::PublishProofBlocksOnchain(BlocksProofOperation { + prev_block: block_metadata(&last_committed_block), + blocks: vec![block_metadata(&block)], + proofs: vec![], + should_verify: false, + }); + send_operation(tester, operation, confirm).await +} + +async fn commit_block( + tester: &mut EthSenderTester, + last_committed_block: L1BatchHeader, + block: L1BatchHeader, + confirm: bool, +) -> H256 { + let operation = AggregatedOperation::CommitBlocks(BlocksCommitOperation { + last_committed_block: block_metadata(&last_committed_block), + blocks: vec![block_metadata(&block)], + }); + send_operation(tester, operation, confirm).await +} + +async fn send_operation( + tester: &mut EthSenderTester, + aggregated_operation: AggregatedOperation, + confirm: bool, +) -> H256 { + let tx = tester + .aggregator + .save_eth_tx(&mut tester.storage().await, &aggregated_operation) + .await + .unwrap(); + + let hash = tester + .manager + .send_eth_tx( + &mut tester.storage().await, + &tx, + 0, + L1BlockNumber(tester.gateway.block_number("").await.unwrap().as_u32()), + ) + .await + .unwrap(); + + if confirm { + confirm_tx(tester, hash).await; + } + hash +} + +async fn confirm_tx(tester: &mut EthSenderTester, hash: H256) { + tester + .gateway + .execute_tx(hash, true, EthSenderTester::WAIT_CONFIRMATIONS) + .unwrap(); + + tester + .manager + .monitor_inflight_transactions( + &mut tester.storage().await, + tester.get_block_numbers().await, ) .await .unwrap(); diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs index a3144b0d4bb0..80fd3176ded3 100644 --- a/core/bin/zksync_core/src/eth_watch/client.rs +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -4,9 +4,7 @@ use std::fmt::{Debug, Display}; use tokio::time::Instant; -use zksync_eth_client::{ - clients::http::PKSigningClient, types::Error as EthClientError, BoundEthInterface, EthInterface, -}; +use zksync_eth_client::{types::Error as EthClientError, EthInterface}; use zksync_types::ethabi::{Contract, Hash}; use zksync_contracts::zksync_contract; @@ -14,10 +12,9 @@ use zksync_types::{ l1::L1Tx, web3::{ self, - contract::Options, types::{BlockNumber, FilterBuilder, Log}, }, - Address, Nonce, H160, + H160, }; #[derive(Debug, thiserror::Error)] @@ -54,9 +51,7 @@ pub trait EthClient { to: BlockNumber, retries_left: usize, ) -> Result, Error>; - async fn block_number(&self) -> Result; - async fn get_auth_fact(&self, address: Address, nonce: Nonce) -> Result, Error>; - async fn get_auth_fact_reset_time(&self, address: Address, nonce: Nonce) -> Result; + async fn finalized_block_number(&self) -> Result; } pub const RETRY_LIMIT: usize = 5; @@ -64,20 +59,26 @@ const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; #[derive(Debug)] -pub struct EthHttpClient { - client: PKSigningClient, +pub struct EthHttpQueryClient { + client: E, topics: ContractTopics, zksync_contract_addr: H160, + confirmations_for_eth_event: Option, } -impl EthHttpClient { - pub fn new(client: PKSigningClient, zksync_contract_addr: H160) -> Self { +impl EthHttpQueryClient { + pub fn new( + client: E, + zksync_contract_addr: H160, + confirmations_for_eth_event: Option, + ) -> Self { vlog::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); let topics = ContractTopics::new(&zksync_contract()); Self { client, topics, zksync_contract_addr, + confirmations_for_eth_event, } } @@ -108,7 +109,7 @@ impl EthHttpClient { } #[async_trait::async_trait] -impl EthClient for EthHttpClient { +impl EthClient for EthHttpQueryClient { async fn get_priority_op_events( &self, from: BlockNumber, @@ -199,33 +200,21 @@ impl EthClient for EthHttpClient { Ok(events) } - async fn block_number(&self) -> Result { - Ok(self.client.block_number("watch").await?.as_u64()) - } - - async fn get_auth_fact(&self, address: Address, nonce: Nonce) -> Result, Error> { - Ok(self - .client - .call_main_contract_function( - "authFacts", - (address, u64::from(*nonce)), - None, - Options::default(), - None, - ) - .await?) - } - - async fn get_auth_fact_reset_time(&self, address: Address, nonce: Nonce) -> Result { - Ok(self - .client - .call_main_contract_function::( - "authFactsResetTimer", - (address, u64::from(*nonce)), - None, - Options::default(), - None, - ) - .await?) + async fn finalized_block_number(&self) -> Result { + if let Some(confirmations) = self.confirmations_for_eth_event { + let latest_block_number = self.client.block_number("watch").await?.as_u64(); + Ok(latest_block_number.saturating_sub(confirmations)) + } else { + self.client + .block("finalized".to_string(), "watch") + .await + .map_err(Into::into) + .map(|res| { + res.expect("Finalized block must be present on L1") + .number + .expect("Finalized block must contain number") + .as_u64() + }) + } } } diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index 58ef62c3ae70..eeb4a2c72226 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -12,18 +12,18 @@ use tokio::{sync::watch, task::JoinHandle}; // Workspace deps use zksync_config::constants::PRIORITY_EXPIRATION; -use zksync_eth_client::clients::http::PKSigningClient; use zksync_types::{ - l1::L1Tx, web3::types::BlockNumber as Web3BlockNumber, L1BlockNumber, PriorityOpId, + l1::L1Tx, web3::types::BlockNumber as Web3BlockNumber, L1BlockNumber, PriorityOpId, H160, }; // Local deps -use self::client::{Error, EthClient, EthHttpClient}; +use self::client::{Error, EthClient}; -use zksync_config::ZkSyncConfig; +use zksync_config::ETHWatchConfig; -use crate::eth_watch::client::RETRY_LIMIT; +use crate::eth_watch::client::{EthHttpQueryClient, RETRY_LIMIT}; use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_eth_client::EthInterface; mod client; @@ -39,57 +39,47 @@ struct EthWatchState { #[derive(Debug)] pub struct EthWatch { client: W, - /// All ethereum events are accepted after sufficient confirmations to eliminate risk of block reorg. - number_of_confirmations_for_event: usize, poll_interval: Duration, state: EthWatchState, } impl EthWatch { - pub async fn new( - client: W, - pool: &ConnectionPool, - number_of_confirmations_for_event: usize, - poll_interval: Duration, - ) -> Self { - let mut storage = pool.access_storage_blocking(); + pub async fn new(client: W, pool: &ConnectionPool, poll_interval: Duration) -> Self { + let mut storage = pool.access_storage_tagged("eth_watch").await; - let state = - Self::initialize_state(&client, &mut storage, number_of_confirmations_for_event).await; + let state = Self::initialize_state(&client, &mut storage).await; vlog::info!("initialized state: {:?}", state); Self { client, - number_of_confirmations_for_event, poll_interval, state, } } - async fn initialize_state( - client: &W, - storage: &mut StorageProcessor<'_>, - number_of_confirmations_for_event: usize, - ) -> EthWatchState { + async fn initialize_state(client: &W, storage: &mut StorageProcessor<'_>) -> EthWatchState { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() + .await .map_or(PriorityOpId(0), |e| e + 1); - let last_processed_ethereum_block = - match storage.transactions_dal().get_last_processed_l1_block() { - // There are some priority ops processed - start from the last processed eth block - // but subtract 1 in case the server stopped mid-block. - Some(block) => block.0.saturating_sub(1).into(), - // There are no priority ops processed - to be safe, scan the last 50k blocks. - None => { - Self::get_current_finalized_eth_block(client, number_of_confirmations_for_event) - .await - .expect("cannot initialize eth watch: cannot get current ETH block") - .saturating_sub(PRIORITY_EXPIRATION) - } - }; + let last_processed_ethereum_block = match storage + .transactions_dal() + .get_last_processed_l1_block() + .await + { + // There are some priority ops processed - start from the last processed eth block + // but subtract 1 in case the server stopped mid-block. + Some(block) => block.0.saturating_sub(1).into(), + // There are no priority ops processed - to be safe, scan the last 50k blocks. + None => client + .finalized_block_number() + .await + .expect("cannot initialize eth watch: cannot get current ETH block") + .saturating_sub(PRIORITY_EXPIRATION), + }; EthWatchState { next_expected_priority_id, @@ -109,17 +99,12 @@ impl EthWatch { metrics::counter!("server.eth_watch.eth_poll", 1); - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("eth_watch").await; if let Err(error) = self.loop_iteration(&mut storage).await { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. vlog::error!("Failed to process new blocks {}", error); - self.state = Self::initialize_state( - &self.client, - &mut storage, - self.number_of_confirmations_for_event, - ) - .await; + self.state = Self::initialize_state(&self.client, &mut storage).await; } } } @@ -127,11 +112,7 @@ impl EthWatch { #[tracing::instrument(skip(self, storage))] async fn loop_iteration(&mut self, storage: &mut StorageProcessor<'_>) -> Result<(), Error> { let mut stage_start = Instant::now(); - let to_block = Self::get_current_finalized_eth_block( - &self.client, - self.number_of_confirmations_for_event, - ) - .await?; + let to_block = self.client.finalized_block_number().await?; if to_block <= self.state.last_processed_ethereum_block { return Ok(()); @@ -167,7 +148,8 @@ impl EthWatch { for (eth_block, new_op) in new_ops { storage .transactions_dal() - .insert_transaction_l1(new_op, eth_block); + .insert_transaction_l1(new_op, eth_block) + .await; } metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist"); } @@ -213,34 +195,22 @@ impl EthWatch { .map(|tx| (L1BlockNumber(tx.eth_block() as u32), tx)) .collect()) } - - // ETH block assumed to be final (that is, old enough to not worry about reorgs) - async fn get_current_finalized_eth_block( - client: &W, - number_of_confirmations_for_event: usize, - ) -> Result { - Ok(client - .block_number() - .await? - .saturating_sub(number_of_confirmations_for_event as u64)) - } } -pub async fn start_eth_watch( +pub async fn start_eth_watch( pool: ConnectionPool, - eth_gateway: PKSigningClient, - config_options: &ZkSyncConfig, + eth_gateway: E, + diamond_proxy_addr: H160, stop_receiver: watch::Receiver, ) -> JoinHandle<()> { - let eth_client = EthHttpClient::new(eth_gateway, config_options.contracts.diamond_proxy_addr); - - let mut eth_watch = EthWatch::new( - eth_client, - &pool, - config_options.eth_watch.confirmations_for_eth_event as usize, - config_options.eth_watch.poll_interval(), - ) - .await; + let eth_watch = ETHWatchConfig::from_env(); + let eth_client = EthHttpQueryClient::new( + eth_gateway, + diamond_proxy_addr, + eth_watch.confirmations_for_eth_event, + ); + + let mut eth_watch = EthWatch::new(eth_client, &pool, eth_watch.poll_interval()).await; tokio::spawn(async move { eth_watch.run(pool, stop_receiver).await; diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs index 646b8684cb3a..fb5a8b43f444 100644 --- a/core/bin/zksync_core/src/eth_watch/tests.rs +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -1,4 +1,3 @@ -use std::cmp::max; use std::collections::HashMap; use std::convert::TryInto; use std::sync::Arc; @@ -10,7 +9,7 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::web3::types::{Address, BlockNumber}; use zksync_types::{ l1::{L1Tx, OpProcessingType, PriorityQueueType}, - Execute, L1TxCommonData, Nonce, PriorityOpId, Transaction, H256, U256, + Execute, L1TxCommonData, PriorityOpId, Transaction, H256, U256, }; use super::client::Error; @@ -18,29 +17,28 @@ use crate::eth_watch::{client::EthClient, EthWatch}; struct FakeEthClientData { transactions: HashMap>, - last_block_number: u64, + last_finalized_block_number: u64, } impl FakeEthClientData { fn new() -> Self { Self { transactions: Default::default(), - last_block_number: 0, + last_finalized_block_number: 0, } } fn add_transactions(&mut self, transactions: &[L1Tx]) { for transaction in transactions { let eth_block = transaction.eth_block(); - self.last_block_number = max(eth_block, self.last_block_number); self.transactions .entry(eth_block) .or_insert_with(Vec::new) .push(transaction.clone()); } } - fn set_last_block_number(&mut self, number: u64) { - self.last_block_number = number; + fn set_last_finalized_block_number(&mut self, number: u64) { + self.last_finalized_block_number = number; } } @@ -60,13 +58,16 @@ impl FakeEthClient { self.inner.write().await.add_transactions(transactions); } - async fn set_last_block_number(&mut self, number: u64) { - self.inner.write().await.set_last_block_number(number); + async fn set_last_finalized_block_number(&mut self, number: u64) { + self.inner + .write() + .await + .set_last_finalized_block_number(number); } async fn block_to_number(&self, block: BlockNumber) -> u64 { match block { - BlockNumber::Latest => self.inner.read().await.last_block_number, + BlockNumber::Latest => unreachable!(), BlockNumber::Earliest => 0, BlockNumber::Pending => unreachable!(), BlockNumber::Number(number) => number.as_u64(), @@ -93,20 +94,8 @@ impl EthClient for FakeEthClient { Ok(transactions) } - async fn block_number(&self) -> Result { - Ok(self.block_to_number(BlockNumber::Latest).await) - } - - async fn get_auth_fact(&self, _address: Address, _nonce: Nonce) -> Result, Error> { - unreachable!() - } - - async fn get_auth_fact_reset_time( - &self, - _address: Address, - _nonce: Nonce, - ) -> Result { - unreachable!() + async fn finalized_block_number(&self) -> Result { + Ok(self.inner.read().await.last_finalized_block_number) } } @@ -145,7 +134,6 @@ async fn test_normal_operation(connection_pool: ConnectionPool) { let mut watcher = EthWatch::new( client.clone(), &connection_pool, - 5, std::time::Duration::from_nanos(1), ) .await; @@ -154,20 +142,20 @@ async fn test_normal_operation(connection_pool: ConnectionPool) { client .add_transactions(&[build_tx(0, 10), build_tx(1, 14), build_tx(2, 18)]) .await; - client.set_last_block_number(20).await; - // second tx will not be processed, as it has less than 5 confirmations + client.set_last_finalized_block_number(15).await; + // second tx will not be processed, as it's block is not finalized yet. watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 2); let db_tx: L1Tx = db_txs[0].clone().try_into().unwrap(); assert_eq!(db_tx.common_data.serial_id.0, 0); let db_tx: L1Tx = db_txs[1].clone().try_into().unwrap(); assert_eq!(db_tx.common_data.serial_id.0, 1); - client.set_last_block_number(25).await; + client.set_last_finalized_block_number(20).await; // now the second tx will be processed watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 3); let db_tx: L1Tx = db_txs[2].clone().try_into().unwrap(); assert_eq!(db_tx.common_data.serial_id.0, 2); @@ -180,7 +168,6 @@ async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { let mut watcher = EthWatch::new( client.clone(), &connection_pool, - 5, std::time::Duration::from_nanos(1), ) .await; @@ -195,7 +182,7 @@ async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { build_tx(5, 14), ]) .await; - client.set_last_block_number(20).await; + client.set_last_finalized_block_number(15).await; watcher.loop_iteration(&mut storage).await.unwrap(); } @@ -206,7 +193,6 @@ async fn test_gap_between_batches(connection_pool: ConnectionPool) { let mut watcher = EthWatch::new( client.clone(), &connection_pool, - 5, std::time::Duration::from_nanos(1), ) .await; @@ -223,11 +209,11 @@ async fn test_gap_between_batches(connection_pool: ConnectionPool) { build_tx(5, 22), ]) .await; - client.set_last_block_number(20).await; + client.set_last_finalized_block_number(15).await; watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 3); - client.set_last_block_number(30).await; + client.set_last_finalized_block_number(25).await; watcher.loop_iteration(&mut storage).await.unwrap(); } @@ -237,7 +223,6 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { let mut watcher = EthWatch::new( client.clone(), &connection_pool, - 5, std::time::Duration::from_nanos(1), ) .await; @@ -256,13 +241,13 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { build_tx(4, 23), ]) .await; - client.set_last_block_number(20).await; + client.set_last_finalized_block_number(15).await; watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 3); - client.set_last_block_number(30).await; + client.set_last_finalized_block_number(25).await; watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage); + let db_txs = get_all_db_txs(&mut storage).await; assert_eq!(db_txs.len(), 5); let tx: L1Tx = db_txs[2].clone().try_into().unwrap(); assert_eq!(tx.common_data.serial_id.0, 2); @@ -270,10 +255,11 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { assert_eq!(tx.common_data.serial_id.0, 4); } -fn get_all_db_txs(storage: &mut StorageProcessor<'_>) -> Vec { - storage.transactions_dal().reset_mempool(); +async fn get_all_db_txs(storage: &mut StorageProcessor<'_>) -> Vec { + storage.transactions_dal().reset_mempool().await; storage .transactions_dal() .sync_mempool(vec![], vec![], 0, 0, 1000) + .await .0 } diff --git a/core/bin/zksync_core/src/fee_monitor/mod.rs b/core/bin/zksync_core/src/fee_monitor/mod.rs deleted file mode 100644 index 9d6e88e6bd8d..000000000000 --- a/core/bin/zksync_core/src/fee_monitor/mod.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! This module contains utilities for monitoring the fee model performance, -//! i.e. ability of the protocol to cover the costs for its own maintenance. - -use std::time::Duration; -use zksync_config::ZkSyncConfig; -use zksync_dal::ConnectionPool; -use zksync_eth_client::{clients::http::PKSigningClient, EthInterface}; -use zksync_types::{ - api::BlockId, AccountTreeId, Address, L1BatchNumber, L2_ETH_TOKEN_ADDRESS, U256, -}; - -/// Component name used to track eth client usage. -const COMPONENT_NAME: &str = "fee-monitor"; - -/// Inclusive iterator for the (from..=to) blocks range -fn block_range(from: L1BatchNumber, to: L1BatchNumber) -> impl Iterator { - (from.0..=to.0).map(L1BatchNumber) -} - -/// Helper trait allowing to convert U256 balance representation to the f64 -/// with the given amount of decimals. -/// -/// Important! Never attempt to use this trait for anything important, because -/// the conversion is, obviously, lossy. -trait BalanceConvert { - fn to_f64_with_decimals(self, decimals: u8) -> f64; -} - -impl BalanceConvert for U256 { - fn to_f64_with_decimals(self, decimals: u8) -> f64 { - let divider = U256::from(10u64.pow(decimals as u32)); - let (quotient, remainder) = self.div_mod(divider); - let remainder_fractional = (remainder.as_u128() as f64) * 10.0f64.powf(-(decimals as f64)); - - quotient.as_u128() as f64 + remainder_fractional - } -} - -#[derive(Debug)] -pub struct FeeMonitor { - operator_address: Address, - fee_account_address: Address, - - storage: ConnectionPool, - client: PKSigningClient, - - next_finalized_block: L1BatchNumber, -} - -impl FeeMonitor { - pub fn new(config: &ZkSyncConfig, storage: ConnectionPool, client: PKSigningClient) -> Self { - let mut storage_processor = storage.access_storage_blocking(); - let latest_l1_batch_finalized = storage_processor - .blocks_dal() - .get_number_of_last_block_executed_on_eth() - .unwrap_or_default(); - drop(storage_processor); - - Self { - operator_address: config.eth_sender.sender.operator_commit_eth_addr, - fee_account_address: config.chain.state_keeper.fee_account_addr, - - storage, - client, - - next_finalized_block: latest_l1_batch_finalized.next(), - } - } - - pub async fn run(mut self) { - // We don't need these metrics to be reported often. - let mut timer = tokio::time::interval(Duration::from_secs(15)); - - loop { - timer.tick().await; - self.run_iter().await; - } - } - - async fn run_iter(&mut self) { - let last_finalized = { - let mut storage = self.storage.access_storage_blocking(); - storage - .blocks_dal() - .get_number_of_last_block_executed_on_eth() - .unwrap_or_default() - }; - - let _ = self.report_balances().await.map_err(|err| { - vlog::warn!("Unable to report account balances in fee monitor: {err}"); - }); - - // Only report data if new blocks were finalized. - if last_finalized >= self.next_finalized_block { - let _ = self.report_collected_fees(last_finalized).map_err(|err| { - vlog::warn!("Unable to report collected fees in fee monitor: {err}"); - }); - let _ = self - .report_l1_batch_finalized(last_finalized) - .map_err(|err| { - vlog::warn!("Unable to report l1 batch finalization in fee monitor: {err}"); - }); - - self.next_finalized_block = last_finalized.next(); - } - } - - async fn report_balances(&self) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage_blocking(); - let mut operator_balance_l1 = self - .client - .eth_balance(self.operator_address, COMPONENT_NAME) - .await? - .to_f64_with_decimals(18); - let mut fee_account_balance_l1 = self - .client - .eth_balance(self.fee_account_address, COMPONENT_NAME) - .await? - .to_f64_with_decimals(18); - let mut fee_account_balance_l2 = storage - .storage_web3_dal() - .standard_token_historical_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - AccountTreeId::new(self.fee_account_address), - BlockId::Number(zksync_types::api::BlockNumber::Pending), - )?? - .to_f64_with_decimals(18); - - // Limit balances to sane values to render them adequatily on the localhost. - for balance in [ - &mut operator_balance_l1, - &mut fee_account_balance_l1, - &mut fee_account_balance_l2, - ] { - // We're unlikely to keep more than 1000 ETH on hot wallets in any real environment. - const MAX_BALANCE_TO_DISPLAY_ETH: f64 = 1000.0f64; - *balance = balance.min(MAX_BALANCE_TO_DISPLAY_ETH); - } - - metrics::gauge!("fee_monitor.balances", operator_balance_l1, "account" => "operator_l1"); - metrics::gauge!("fee_monitor.balances", fee_account_balance_l1, "account" => "fee_account_l1"); - metrics::gauge!("fee_monitor.balances", fee_account_balance_l2, "account" => "fee_account_l2"); - - Ok(()) - } - - fn report_collected_fees(&mut self, last_finalized: L1BatchNumber) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage_blocking(); - for block_number in block_range(self.next_finalized_block, last_finalized) { - let collected_fees = storage - .fee_monitor_dal() - .fetch_erc20_transfers(block_number, self.fee_account_address)?; - - let total_fee_wei: U256 = collected_fees - .into_iter() - .fold(U256::zero(), |acc, x| acc + x); - - // Convert value to gwei to reduce verbosity. - let fee_in_gwei = total_fee_wei.to_f64_with_decimals(9); - metrics::gauge!("fee_monitor.collected_fees", fee_in_gwei); - vlog::info!("Collected fees in block {block_number}: {fee_in_gwei:.6} gwei"); - } - - Ok(()) - } - - fn report_l1_batch_finalized(&mut self, last_finalized: L1BatchNumber) -> anyhow::Result<()> { - let mut storage = self.storage.access_storage_blocking(); - for block_number in block_range(self.next_finalized_block, last_finalized) { - let block_data = storage - .fee_monitor_dal() - .get_block_gas_consumption(block_number)?; - let total_wei_spent = U256::from(block_data.wei_spent()); - - // Convert value to gwei to reduce verbosity. - let gwei_spent = total_wei_spent.to_f64_with_decimals(9); - metrics::gauge!("fee_monitor.expenses", gwei_spent); - vlog::info!("Block processing expenses in block {block_number}: {gwei_spent:.6} gwei"); - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn check(val: u64, expected: f64, decimals: u8) { - // That's a bad way to compare floats, mmkay? - // However we aren't going for precision anyways, so don't tell anyone. ( ͡° ͜ʖ ͡°) - let result = U256::from(val).to_f64_with_decimals(decimals); - let abs_diff = (result - expected).abs(); - assert!( - abs_diff < 0.000001f64, - "Value mismatch: expected {}, got {}", - expected, - result - ); - } - - #[test] - fn to_f64_with_decimals() { - check(1000000, 1.0, 6); - check(1000001, 1.000001, 6); - check(1800001, 1.800001, 6); - check(3241500000000000000, 3.2415, 18); - } -} diff --git a/core/bin/zksync_core/src/fee_ticker/mod.rs b/core/bin/zksync_core/src/fee_ticker/mod.rs index ebf9a2ceac68..35facd0e4b35 100644 --- a/core/bin/zksync_core/src/fee_ticker/mod.rs +++ b/core/bin/zksync_core/src/fee_ticker/mod.rs @@ -34,16 +34,16 @@ pub struct FeeTicker; impl FeeTicker { /// Returns the token price in USD. - pub fn get_l2_token_price( + pub async fn get_l2_token_price( tokens_web3_dal: &mut TokensWeb3Dal<'_, '_>, request_type: TokenPriceRequestType, l2_token_addr: &Address, ) -> Result { - Self::get_l2_token_price_inner(tokens_web3_dal, request_type, l2_token_addr).map( - |final_price| { + Self::get_l2_token_price_inner(tokens_web3_dal, request_type, l2_token_addr) + .await + .map(|final_price| { ratio_to_big_decimal_normalized(&final_price, USD_PRECISION, MIN_PRECISION) - }, - ) + }) } /// Returns the acceptable `gas_per_pubdata_byte` based on the current gas price. @@ -51,13 +51,14 @@ impl FeeTicker { base_fee_to_gas_per_pubdata(gas_price_wei, base_fee) as u32 } - fn get_l2_token_price_inner( + async fn get_l2_token_price_inner( tokens_web3_dal: &mut TokensWeb3Dal<'_, '_>, request_type: TokenPriceRequestType, l2_token_addr: &Address, ) -> Result, TickerError> { let token_price = tokens_web3_dal .get_token_price(l2_token_addr) + .await .map_err(|_| TickerError::InternalError)? .ok_or(TickerError::PriceNotTracked(*l2_token_addr))? .usd_price; @@ -67,6 +68,7 @@ impl FeeTicker { TokenPriceRequestType::USDForOneWei => { let token_metadata = tokens_web3_dal .get_token_metadata(l2_token_addr) + .await .map_err(|_| TickerError::InternalError)? .ok_or_else(|| { // It's kinda not OK that we have a price for token, but no metadata. diff --git a/core/bin/zksync_core/src/genesis.rs b/core/bin/zksync_core/src/genesis.rs index caff289fc53f..e6f65cbf77df 100644 --- a/core/bin/zksync_core/src/genesis.rs +++ b/core/bin/zksync_core/src/genesis.rs @@ -2,15 +2,10 @@ //! It initializes the Merkle tree with the basic setup (such as fields of special service accounts), //! setups the required databases, and outputs the data required to initialize a smart contract. -use crate::sync_layer::genesis::fetch_base_system_contracts; -use tempfile::TempDir; use vm::zk_evm::aux_structures::{LogQuery, Timestamp}; - use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_dal::StorageProcessor; -use zksync_merkle_tree::ZkSyncTree; -use zksync_storage::db::Database; -use zksync_storage::RocksDB; +use zksync_merkle_tree::domain::ZkSyncTree; use zksync_types::{ block::DeployedContract, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, @@ -24,6 +19,10 @@ use zksync_types::{ use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, miniblock_hash}; use zksync_web3_decl::{jsonrpsee::http_client::HttpClientBuilder, namespaces::ZksNamespaceClient}; +use crate::{ + metadata_calculator::get_logs_for_l1_batch, sync_layer::genesis::fetch_base_system_contracts, +}; + #[derive(Debug, Clone)] pub enum GenesisParams { MainNode { @@ -38,20 +37,17 @@ pub enum GenesisParams { pub async fn ensure_genesis_state( storage: &mut StorageProcessor<'_>, zksync_chain_id: L2ChainId, - genesis_params: GenesisParams, + genesis_params: &GenesisParams, ) -> H256 { - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let db = RocksDB::new(Database::MerkleTree, temp_dir.as_ref(), false); - let mut tree = ZkSyncTree::new(db); - - let mut transaction = storage.start_transaction_blocking(); + let mut transaction = storage.start_transaction().await; // return if genesis block was already processed - if !transaction.blocks_dal().is_genesis_needed() { + if !transaction.blocks_dal().is_genesis_needed().await { vlog::debug!("genesis is not needed!"); return transaction .blocks_dal() .get_block_state_root(L1BatchNumber(0)) + .await .expect("genesis block hash is empty"); } @@ -71,7 +67,7 @@ pub async fn ensure_genesis_state( // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. let contracts = - fetch_base_system_contracts(&main_node_url, base_system_contracts_hashes) + fetch_base_system_contracts(main_node_url, *base_system_contracts_hashes) .await .expect("Failed to fetch base system contracts from main node"); @@ -87,7 +83,7 @@ pub async fn ensure_genesis_state( (contracts, first_validator) } GenesisParams::MainNode { first_validator } => { - (BaseSystemContracts::load_from_disk(), first_validator) + (BaseSystemContracts::load_from_disk(), *first_validator) } }; @@ -98,14 +94,15 @@ pub async fn ensure_genesis_state( first_validator_address, zksync_chain_id, base_system_contracts, - ); + ) + .await; vlog::info!("chain_schema_genesis is complete"); - let storage_logs = - crate::metadata_calculator::get_logs_for_l1_batch(&mut transaction, L1BatchNumber(0)); - let metadata = tree.process_block(storage_logs.unwrap().storage_logs); + let storage_logs = get_logs_for_l1_batch(&mut transaction, L1BatchNumber(0)).await; + let storage_logs = storage_logs.unwrap().storage_logs; + let metadata = ZkSyncTree::process_genesis_batch(&storage_logs); let genesis_root_hash = metadata.root_hash; - let rollup_last_leaf_index = metadata.rollup_last_leaf_index; + let rollup_last_leaf_index = metadata.leaf_count + 1; let block_commitment = BlockCommitment::new( vec![], @@ -122,10 +119,11 @@ pub async fn ensure_genesis_state( &block_commitment, genesis_root_hash, rollup_last_leaf_index, - ); + ) + .await; vlog::info!("operations_schema_genesis is complete"); - transaction.commit_blocking(); + transaction.commit().await; // We need to `println` this value because it will be used to initialize the smart contract. println!("CONTRACTS_GENESIS_ROOT={:?}", genesis_root_hash); @@ -155,21 +153,22 @@ pub async fn ensure_genesis_state( // Both are rather parameters of a block and not system contracts. // The code of the bootloader should not be deployed anywhere anywhere in the kernel space (i.e. addresses below 2^16) // because in this case we will have to worry about protecting it. -fn insert_base_system_contracts_to_factory_deps( +async fn insert_base_system_contracts_to_factory_deps( storage: &mut StorageProcessor<'_>, contracts: BaseSystemContracts, ) { - let factory_deps = vec![contracts.bootloader, contracts.default_aa] + let factory_deps = [contracts.bootloader, contracts.default_aa] .iter() .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); storage .storage_dal() - .insert_factory_deps(MiniblockNumber(0), factory_deps); + .insert_factory_deps(MiniblockNumber(0), &factory_deps) + .await; } -fn insert_system_contracts( +async fn insert_system_contracts( storage: &mut StorageProcessor<'_>, contracts: Vec, chain_id: L2ChainId, @@ -191,11 +190,12 @@ fn insert_system_contracts( .chain(Some(system_context_init_logs)) .collect(); - let mut transaction = storage.start_transaction_blocking(); + let mut transaction = storage.start_transaction().await; transaction .storage_logs_dal() - .insert_storage_logs(MiniblockNumber(0), &storage_logs); + .insert_storage_logs(MiniblockNumber(0), &storage_logs) + .await; // we don't produce proof for the genesis block, // but we still need to populate the table @@ -234,12 +234,17 @@ fn insert_system_contracts( .partition(|log_query| log_query.rw_flag); transaction .storage_logs_dedup_dal() - .insert_protective_reads(L1BatchNumber(0), &protective_reads); + .insert_protective_reads(L1BatchNumber(0), &protective_reads) + .await; transaction .storage_logs_dedup_dal() - .insert_initial_writes(L1BatchNumber(0), &deduplicated_writes); + .insert_initial_writes(L1BatchNumber(0), &deduplicated_writes) + .await; - transaction.storage_dal().apply_storage_logs(&storage_logs); + transaction + .storage_dal() + .apply_storage_logs(&storage_logs) + .await; let factory_deps = contracts .into_iter() @@ -247,12 +252,13 @@ fn insert_system_contracts( .collect(); transaction .storage_dal() - .insert_factory_deps(MiniblockNumber(0), factory_deps); + .insert_factory_deps(MiniblockNumber(0), &factory_deps) + .await; - transaction.commit_blocking(); + transaction.commit().await; } -pub(crate) fn create_genesis_block( +pub(crate) async fn create_genesis_block( storage: &mut StorageProcessor<'_>, first_validator_address: Address, chain_id: L2ChainId, @@ -278,29 +284,32 @@ pub(crate) fn create_genesis_block( base_system_contracts_hashes: base_system_contracts.hashes(), }; - let mut transaction = storage.start_transaction_blocking(); + let mut transaction = storage.start_transaction().await; transaction .blocks_dal() - .insert_l1_batch(zero_block_header, BlockGasCount::default()); + .insert_l1_batch(&zero_block_header, BlockGasCount::default()) + .await; transaction .blocks_dal() - .insert_miniblock(zero_miniblock_header); + .insert_miniblock(&zero_miniblock_header) + .await; transaction .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(0)); + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(0)) + .await; - insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts); + insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts).await; let contracts = get_system_smart_contracts(); - insert_system_contracts(&mut transaction, contracts, chain_id); + insert_system_contracts(&mut transaction, contracts, chain_id).await; - add_eth_token(&mut transaction); + add_eth_token(&mut transaction).await; - transaction.commit_blocking(); + transaction.commit().await; } -pub(crate) fn add_eth_token(storage: &mut StorageProcessor<'_>) { +pub(crate) async fn add_eth_token(storage: &mut StorageProcessor<'_>) { let eth_token = TokenInfo { l1_address: ETHEREUM_ADDRESS, l2_address: ETHEREUM_ADDRESS, @@ -311,17 +320,21 @@ pub(crate) fn add_eth_token(storage: &mut StorageProcessor<'_>) { }, }; - let mut transaction = storage.start_transaction_blocking(); + let mut transaction = storage.start_transaction().await; - transaction.tokens_dal().add_tokens(vec![eth_token.clone()]); transaction .tokens_dal() - .update_well_known_l1_token(ÐEREUM_ADDRESS, eth_token.metadata); + .add_tokens(vec![eth_token.clone()]) + .await; + transaction + .tokens_dal() + .update_well_known_l1_token(ÐEREUM_ADDRESS, eth_token.metadata) + .await; - transaction.commit_blocking(); + transaction.commit().await; } -pub(crate) fn save_genesis_block_metadata( +pub(crate) async fn save_genesis_block_metadata( storage: &mut StorageProcessor<'_>, block_commitment: &BlockCommitment, genesis_root_hash: H256, @@ -345,5 +358,33 @@ pub(crate) fn save_genesis_block_metadata( }; storage .blocks_dal() - .save_block_metadata(L1BatchNumber(0), metadata); + .save_block_metadata(L1BatchNumber(0), &metadata) + .await; +} + +#[cfg(test)] +mod tests { + use db_test_macro::db_test; + use zksync_dal::ConnectionPool; + + use super::*; + + #[db_test] + async fn running_genesis(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + conn.blocks_dal().delete_genesis().await; + + let params = GenesisParams::MainNode { + first_validator: Address::random(), + }; + ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms).await; + + assert!(!conn.blocks_dal().is_genesis_needed().await); + let metadata = conn.blocks_dal().get_block_metadata(L1BatchNumber(0)).await; + let root_hash = metadata.unwrap().metadata.root_hash; + assert_ne!(root_hash, H256::zero()); + + // Check that `ensure_genesis_state()` doesn't panic on repeated runs. + ensure_genesis_state(&mut conn, L2ChainId(270), ¶ms).await; + } } diff --git a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs index 0614f66741c1..2a64a4bc10d8 100644 --- a/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/bin/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -1,38 +1,46 @@ +use async_trait::async_trait; + use zksync_dal::ConnectionPool; +use zksync_utils::time::seconds_since_epoch; use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct L1BatchMetricsReporter { reporting_interval_ms: u64, + connection_pool: ConnectionPool, } impl L1BatchMetricsReporter { - pub fn new(reporting_interval_ms: u64) -> Self { + pub fn new(reporting_interval_ms: u64, connection_pool: ConnectionPool) -> Self { Self { reporting_interval_ms, + connection_pool, } } - fn report_metrics(&self, connection_pool: ConnectionPool) { - let mut conn = connection_pool.access_storage_blocking(); + async fn report_metrics(&self) { + let mut conn = self.connection_pool.access_storage().await; let mut block_metrics = vec![ ( - conn.blocks_dal().get_sealed_block_number(), + conn.blocks_dal().get_sealed_block_number().await, "sealed".to_string(), ), ( - conn.blocks_dal().get_last_block_number_with_metadata(), + conn.blocks_dal() + .get_last_block_number_with_metadata() + .await, "metadata_calculated".to_string(), ), ( conn.blocks_dal() - .get_last_l1_batch_number_with_witness_inputs(), + .get_last_l1_batch_number_with_witness_inputs() + .await, "merkle_proof_calculated".to_string(), ), ]; - let eth_stats = conn.eth_sender_dal().get_eth_l1_batches(); + let eth_stats = conn.eth_sender_dal().get_eth_l1_batches().await; for (tx_type, l1_batch) in eth_stats.saved { block_metrics.push((l1_batch, format!("l1_saved_{:?}", tx_type))) } @@ -41,24 +49,6 @@ impl L1BatchMetricsReporter { block_metrics.push((l1_batch, format!("l1_mined_{:?}", tx_type))) } - block_metrics.append( - &mut conn - .prover_dal() - .get_proven_l1_batches() - .into_iter() - .map(|(l1_batch_number, stage)| (l1_batch_number, format!("prove_{:?}", stage))) - .collect(), - ); - - block_metrics.append( - &mut conn - .witness_generator_dal() - .get_witness_generated_l1_batches() - .into_iter() - .map(|(l1_batch_number, stage)| (l1_batch_number, format!("wit_gen_{:?}", stage))) - .collect(), - ); - for (l1_batch_number, stage) in block_metrics { metrics::gauge!( "server.block_number", @@ -66,14 +56,48 @@ impl L1BatchMetricsReporter { "stage" => stage ); } + + let oldest_uncommitted_batch_timestamp = + conn.blocks_dal().oldest_uncommitted_batch_timestamp().await; + let oldest_unproved_batch_timestamp = + conn.blocks_dal().oldest_unproved_batch_timestamp().await; + let oldest_unexecuted_batch_timestamp = + conn.blocks_dal().oldest_unexecuted_batch_timestamp().await; + + let now = seconds_since_epoch(); + + if let Some(timestamp) = oldest_uncommitted_batch_timestamp { + metrics::gauge!( + "server.blocks_state.block_eth_stage_latency", + now.saturating_sub(timestamp) as f64, + "stage" => "uncommitted_block" + ); + } + + if let Some(timestamp) = oldest_unproved_batch_timestamp { + metrics::gauge!( + "server.blocks_state.block_eth_stage_latency", + now.saturating_sub(timestamp) as f64, + "stage" => "unproved_block" + ); + } + + if let Some(timestamp) = oldest_unexecuted_batch_timestamp { + metrics::gauge!( + "server.blocks_state.block_eth_stage_latency", + now.saturating_sub(timestamp) as f64, + "stage" => "unexecuted_block" + ); + } } } +#[async_trait] impl PeriodicJob for L1BatchMetricsReporter { const SERVICE_NAME: &'static str = "L1BatchMetricsReporter"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - self.report_metrics(connection_pool); + async fn run_routine_task(&mut self) { + self.report_metrics().await; } fn polling_interval_ms(&self) -> u64 { diff --git a/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs new file mode 100644 index 000000000000..2b2dc4ce7d54 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs @@ -0,0 +1,54 @@ +use std::time::Duration; + +use crate::house_keeper::periodic_job::PeriodicJob; +use async_trait::async_trait; +use zksync_dal::ConnectionPool; + +#[derive(Debug)] +pub struct FriProverJobRetryManager { + pool: ConnectionPool, + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, +} + +impl FriProverJobRetryManager { + pub fn new( + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, + pool: ConnectionPool, + ) -> Self { + Self { + max_attempts, + processing_timeout, + retry_interval_ms, + pool, + } + } +} + +/// Invoked periodically to re-queue stuck fri prover jobs. +#[async_trait] +impl PeriodicJob for FriProverJobRetryManager { + const SERVICE_NAME: &'static str = "FriProverJobRetryManager"; + + async fn run_routine_task(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_prover_jobs_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing fri prover job {:?}", stuck_job); + } + metrics::counter!("server.prover_fri.requeued_jobs", job_len as u64); + } + + fn polling_interval_ms(&self) -> u64 { + self.retry_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs new file mode 100644 index 000000000000..2387f0ffc850 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -0,0 +1,63 @@ +use crate::house_keeper::periodic_job::PeriodicJob; +use async_trait::async_trait; +use zksync_dal::ConnectionPool; + +#[derive(Debug)] +pub struct FriProverStatsReporter { + reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, +} + +impl FriProverStatsReporter { + pub fn new(reporting_interval_ms: u64, prover_connection_pool: ConnectionPool) -> Self { + Self { + reporting_interval_ms, + prover_connection_pool, + } + } +} + +/// Invoked periodically to push prover queued/inprogress job statistics +#[async_trait] +impl PeriodicJob for FriProverStatsReporter { + const SERVICE_NAME: &'static str = "FriProverStatsReporter"; + + async fn run_routine_task(&mut self) { + let mut conn = self.prover_connection_pool.access_storage().await; + let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; + + for ((circuit_id, aggregation_round), stats) in stats.into_iter() { + metrics::gauge!( + "fri_prover.prover.jobs", + stats.queued as f64, + "type" => "queued", + "circuit_id" => circuit_id.to_string(), + "aggregation_round" => aggregation_round.to_string() + ); + + metrics::gauge!( + "fri_prover.prover.jobs", + stats.in_progress as f64, + "type" => "in_progress", + "circuit_id" => circuit_id.to_string(), + "aggregation_round" => aggregation_round.to_string() + ); + } + + let lag_by_circuit_type = conn + .fri_prover_jobs_dal() + .min_unproved_l1_batch_number() + .await; + + for ((circuit_id, aggregation_round), l1_batch_number) in lag_by_circuit_type { + metrics::gauge!( + "fri_prover.block_number", l1_batch_number.0 as f64, + "circuit_id" => circuit_id.to_string(), + "aggregation_round" => aggregation_round.to_string()); + } + } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs new file mode 100644 index 000000000000..dc12445045fa --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -0,0 +1,57 @@ +use async_trait::async_trait; +use zksync_dal::ConnectionPool; + +use crate::house_keeper::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct SchedulerCircuitQueuer { + queuing_interval_ms: u64, + pool: ConnectionPool, +} + +impl SchedulerCircuitQueuer { + pub fn new(queuing_interval_ms: u64, pool: ConnectionPool) -> Self { + Self { + queuing_interval_ms, + pool, + } + } + + pub async fn queue_scheduler_circuit_jobs(&mut self) { + let mut conn = self.pool.access_storage().await; + let l1_batch_numbers = conn + .fri_scheduler_dependency_tracker_dal() + .get_l1_batches_ready_for_queuing() + .await; + let len = l1_batch_numbers.len(); + for &l1_batch_number in l1_batch_numbers.iter() { + conn.fri_witness_generator_dal() + .mark_scheduler_jobs_as_queued(l1_batch_number) + .await; + vlog::info!( + "Marked fri scheduler aggregation job for l1_batch {} as queued", + l1_batch_number, + ); + } + conn.fri_scheduler_dependency_tracker_dal() + .mark_l1_batches_queued(l1_batch_numbers) + .await; + metrics::counter!( + "server.scheduler_fri_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } +} + +#[async_trait] +impl PeriodicJob for SchedulerCircuitQueuer { + const SERVICE_NAME: &'static str = "SchedulerCircuitQueuer"; + + async fn run_routine_task(&mut self) { + self.queue_scheduler_circuit_jobs().await; + } + + fn polling_interval_ms(&self) -> u64 { + self.queuing_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs new file mode 100644 index 000000000000..eefd4fa9e770 --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs @@ -0,0 +1,112 @@ +use std::time::Duration; + +use crate::house_keeper::periodic_job::PeriodicJob; +use async_trait::async_trait; +use zksync_dal::ConnectionPool; + +#[derive(Debug)] +pub struct FriWitnessGeneratorJobRetryManager { + pool: ConnectionPool, + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, +} + +impl FriWitnessGeneratorJobRetryManager { + pub fn new( + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, + pool: ConnectionPool, + ) -> Self { + Self { + max_attempts, + processing_timeout, + retry_interval_ms, + pool, + } + } + + pub async fn requeue_stuck_witness_inputs_jobs(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_witness_generator_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + } + metrics::counter!("server.witness_inputs_fri.requeued_jobs", job_len as u64); + } + + pub async fn requeue_stuck_leaf_aggregations_jobs(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_witness_generator_dal() + .requeue_stuck_leaf_aggregations_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + } + metrics::counter!( + "server.leaf_aggregations_jobs_fri.requeued_jobs", + job_len as u64 + ); + } + + pub async fn requeue_stuck_node_aggregations_jobs(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_witness_generator_dal() + .requeue_stuck_node_aggregations_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + } + metrics::counter!( + "server.node_aggregations_jobs_fri.requeued_jobs", + job_len as u64 + ); + } + + pub async fn requeue_stuck_scheduler_jobs(&mut self) { + let stuck_jobs = self + .pool + .access_storage() + .await + .fri_witness_generator_dal() + .requeue_stuck_scheduler_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + vlog::info!("re-queuing fri witness input job {:?}", stuck_job); + } + metrics::counter!("server.scheduler_jobs_fri.requeued_jobs", job_len as u64); + } +} + +/// Invoked periodically to re-queue stuck fri witness generator jobs. +#[async_trait] +impl PeriodicJob for FriWitnessGeneratorJobRetryManager { + const SERVICE_NAME: &'static str = "FriWitnessGeneratorJobRetryManager"; + + async fn run_routine_task(&mut self) { + self.requeue_stuck_witness_inputs_jobs().await; + self.requeue_stuck_leaf_aggregations_jobs().await; + self.requeue_stuck_node_aggregations_jobs().await; + self.requeue_stuck_scheduler_jobs().await; + } + + fn polling_interval_ms(&self) -> u64 { + self.retry_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs new file mode 100644 index 000000000000..c70d024c8a7e --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -0,0 +1,119 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use zksync_dal::ConnectionPool; +use zksync_types::proofs::{AggregationRound, JobCountStatistics}; + +use crate::house_keeper::periodic_job::PeriodicJob; + +const FRI_WITNESS_GENERATOR_SERVICE_NAME: &str = "fri_witness_generator"; + +#[derive(Debug)] +pub struct FriWitnessGeneratorStatsReporter { + reporting_interval_ms: u64, + pool: ConnectionPool, +} + +impl FriWitnessGeneratorStatsReporter { + pub fn new(pool: ConnectionPool, reporting_interval_ms: u64) -> Self { + Self { + reporting_interval_ms, + pool, + } + } + + async fn get_job_statistics(&self) -> HashMap { + let mut conn = self.pool.access_storage().await; + HashMap::from([ + ( + AggregationRound::BasicCircuits, + conn.fri_witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::BasicCircuits) + .await, + ), + ( + AggregationRound::LeafAggregation, + conn.fri_witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::LeafAggregation) + .await, + ), + ( + AggregationRound::NodeAggregation, + conn.fri_witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::NodeAggregation) + .await, + ), + ( + AggregationRound::Scheduler, + conn.fri_witness_generator_dal() + .get_witness_jobs_stats(AggregationRound::Scheduler) + .await, + ), + ]) + } +} + +fn emit_metrics_for_round(round: AggregationRound, stats: JobCountStatistics) { + if stats.queued > 0 || stats.in_progress > 0 { + vlog::trace!( + "Found {} free and {} in progress {:?} FRI witness generators jobs", + stats.queued, + stats.in_progress, + round + ); + } + + metrics::gauge!( + format!("server.{}.jobs", FRI_WITNESS_GENERATOR_SERVICE_NAME), + stats.queued as f64, + "type" => "queued", + "round" => format!("{:?}", round) + ); + + metrics::gauge!( + format!("server.{}.jobs", FRI_WITNESS_GENERATOR_SERVICE_NAME), + stats.in_progress as f64, + "type" => "in_progress", + "round" => format!("{:?}", round) + ); +} + +/// Invoked periodically to push job statistics to Prometheus +/// Note: these values will be used for auto-scaling job processors +#[async_trait] +impl PeriodicJob for FriWitnessGeneratorStatsReporter { + const SERVICE_NAME: &'static str = "WitnessGeneratorStatsReporter"; + + async fn run_routine_task(&mut self) { + let stats_for_all_rounds = self.get_job_statistics().await; + let mut aggregated = JobCountStatistics::default(); + for (round, stats) in stats_for_all_rounds { + emit_metrics_for_round(round, stats); + aggregated = aggregated + stats; + } + + if aggregated.queued > 0 { + vlog::trace!( + "Found {} free {} in progress witness generators jobs", + aggregated.queued, + aggregated.in_progress + ); + } + + metrics::gauge!( + format!("server.{}.jobs", FRI_WITNESS_GENERATOR_SERVICE_NAME), + aggregated.queued as f64, + "type" => "queued" + ); + + metrics::gauge!( + format!("server.{}.jobs", FRI_WITNESS_GENERATOR_SERVICE_NAME), + aggregated.in_progress as f64, + "type" => "in_progress" + ); + } + + fn polling_interval_ms(&self) -> u64 { + self.reporting_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs index 2a2f6e0a9c7b..9954cc012f80 100644 --- a/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs +++ b/core/bin/zksync_core/src/house_keeper/gcs_blob_cleaner.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}; @@ -23,6 +24,7 @@ impl AsBlobUrls for (String, String) { pub struct GcsBlobCleaner { object_store: Box, cleaning_interval_ms: u64, + pool: ConnectionPool, } const BATCH_CLEANUP_SIZE: u8 = 5; @@ -40,102 +42,121 @@ fn handle_remove_result(result: Result<(), ObjectStoreError>) { } impl GcsBlobCleaner { - pub fn new(store_factory: &ObjectStoreFactory, cleaning_interval_ms: u64) -> Self { + pub async fn new( + store_factory: &ObjectStoreFactory, + pool: ConnectionPool, + cleaning_interval_ms: u64, + ) -> Self { Self { - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, cleaning_interval_ms, + pool, } } - fn cleanup_blobs(&mut self, pool: ConnectionPool) { - self.cleanup_prover_jobs_blobs(pool.clone()); - self.cleanup_witness_inputs_blobs(pool.clone()); - self.cleanup_leaf_aggregation_witness_jobs_blobs(pool.clone()); - self.cleanup_node_aggregation_witness_jobs_blobs(pool.clone()); - self.cleanup_scheduler_witness_jobs_blobs(pool); + async fn cleanup_blobs(&mut self) { + self.cleanup_prover_jobs_blobs().await; + self.cleanup_witness_inputs_blobs().await; + self.cleanup_leaf_aggregation_witness_jobs_blobs().await; + self.cleanup_node_aggregation_witness_jobs_blobs().await; + self.cleanup_scheduler_witness_jobs_blobs().await; } - fn cleanup_prover_jobs_blobs(&self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn cleanup_prover_jobs_blobs(&self) { + let mut conn = self.pool.access_storage().await; let blob_urls = conn .prover_dal() - .get_circuit_input_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let ids = self.cleanup_blob_urls(Bucket::ProverJobs, blob_urls); - conn.prover_dal().mark_gcs_blobs_as_cleaned(ids); + .get_circuit_input_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE) + .await; + let ids = self.cleanup_blob_urls(Bucket::ProverJobs, blob_urls).await; + conn.prover_dal().mark_gcs_blobs_as_cleaned(ids).await; } - fn cleanup_blob_urls( + async fn cleanup_blob_urls( &self, bucket: Bucket, blob_urls: Vec<(i64, S)>, ) -> Vec { if !blob_urls.is_empty() { - vlog::info!("Found {} {} for cleaning blobs", blob_urls.len(), bucket); + vlog::info!("Found {} {bucket} for cleaning blobs", blob_urls.len()); } for (_, url) in &blob_urls { let (first_url, second_url) = url.as_blob_urls(); - handle_remove_result(self.object_store.remove_raw(bucket, first_url)); + handle_remove_result(self.object_store.remove_raw(bucket, first_url).await); if let Some(second_url) = second_url { - handle_remove_result(self.object_store.remove_raw(bucket, second_url)); + handle_remove_result(self.object_store.remove_raw(bucket, second_url).await); } } blob_urls.into_iter().map(|(id, _)| id).collect() } - fn cleanup_witness_inputs_blobs(&self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn cleanup_witness_inputs_blobs(&self) { + let mut conn = self.pool.access_storage().await; let blob_urls = conn .blocks_dal() - .get_merkle_tree_paths_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let l1_batch_numbers = self.cleanup_blob_urls(Bucket::WitnessInput, blob_urls); + .get_merkle_tree_paths_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE) + .await; + let l1_batch_numbers = self + .cleanup_blob_urls(Bucket::WitnessInput, blob_urls) + .await; conn.blocks_dal() - .mark_gcs_blobs_as_cleaned(l1_batch_numbers); + .mark_gcs_blobs_as_cleaned(&l1_batch_numbers) + .await; } - fn cleanup_leaf_aggregation_witness_jobs_blobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn cleanup_leaf_aggregation_witness_jobs_blobs(&self) { + let mut conn = self.pool.access_storage().await; let blob_urls = conn .witness_generator_dal() - .get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE); - let l1_batch_numbers = - self.cleanup_blob_urls(Bucket::LeafAggregationWitnessJobs, blob_urls); + .get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE) + .await; + let l1_batch_numbers = self + .cleanup_blob_urls(Bucket::LeafAggregationWitnessJobs, blob_urls) + .await; conn.witness_generator_dal() - .mark_leaf_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers); + .mark_leaf_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers) + .await; } - fn cleanup_node_aggregation_witness_jobs_blobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn cleanup_node_aggregation_witness_jobs_blobs(&self) { + let mut conn = self.pool.access_storage().await; let blob_urls = conn .witness_generator_dal() .get_leaf_layer_subqueues_and_aggregation_outputs_blob_urls_to_be_cleaned( BATCH_CLEANUP_SIZE, - ); - let l1_batch_numbers = - self.cleanup_blob_urls(Bucket::NodeAggregationWitnessJobs, blob_urls); + ) + .await; + let l1_batch_numbers = self + .cleanup_blob_urls(Bucket::NodeAggregationWitnessJobs, blob_urls) + .await; conn.witness_generator_dal() - .mark_node_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers); + .mark_node_aggregation_gcs_blobs_as_cleaned(l1_batch_numbers) + .await; } - fn cleanup_scheduler_witness_jobs_blobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn cleanup_scheduler_witness_jobs_blobs(&self) { + let mut conn = self.pool.access_storage().await; let blob_urls = conn .witness_generator_dal() - .get_scheduler_witness_and_node_aggregations_blob_urls_to_be_cleaned( - BATCH_CLEANUP_SIZE, - ); - let l1_batch_numbers = self.cleanup_blob_urls(Bucket::SchedulerWitnessJobs, blob_urls); + .get_scheduler_witness_and_node_aggregations_blob_urls_to_be_cleaned(BATCH_CLEANUP_SIZE) + .await; + let l1_batch_numbers = self + .cleanup_blob_urls(Bucket::SchedulerWitnessJobs, blob_urls) + .await; conn.witness_generator_dal() - .mark_scheduler_witness_gcs_blobs_as_cleaned(l1_batch_numbers); + .mark_scheduler_witness_gcs_blobs_as_cleaned(l1_batch_numbers) + .await; } } +#[async_trait] impl PeriodicJob for GcsBlobCleaner { const SERVICE_NAME: &'static str = "GcsBlobCleaner"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - self.cleanup_blobs(connection_pool); + async fn run_routine_task(&mut self) { + self.cleanup_blobs().await; } fn polling_interval_ms(&self) -> u64 { diff --git a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index 27c2792ec1f5..194ead19ee62 100644 --- a/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -6,27 +7,37 @@ use crate::house_keeper::periodic_job::PeriodicJob; pub struct GpuProverQueueMonitor { synthesizer_per_gpu: u16, reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, } impl GpuProverQueueMonitor { - pub fn new(synthesizer_per_gpu: u16, reporting_interval_ms: u64) -> Self { + pub fn new( + synthesizer_per_gpu: u16, + reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { synthesizer_per_gpu, reporting_interval_ms, + prover_connection_pool, } } } /// Invoked periodically to push prover job statistics to Prometheus /// Note: these values will be used for auto-scaling circuit-synthesizer +#[async_trait] impl PeriodicJob for GpuProverQueueMonitor { const SERVICE_NAME: &'static str = "GpuProverQueueMonitor"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - let prover_gpu_count_per_region_zone = connection_pool - .access_storage_blocking() + async fn run_routine_task(&mut self) { + let prover_gpu_count_per_region_zone = self + .prover_connection_pool + .access_storage() + .await .gpu_prover_queue_dal() - .get_prover_gpu_count_per_region_zone(); + .get_prover_gpu_count_per_region_zone() + .await; for ((region, zone), num_gpu) in prover_gpu_count_per_region_zone { let synthesizers = self.synthesizer_per_gpu as u64 * num_gpu; diff --git a/core/bin/zksync_core/src/house_keeper/mod.rs b/core/bin/zksync_core/src/house_keeper/mod.rs index d467b1287d4b..11cf7d1e272a 100644 --- a/core/bin/zksync_core/src/house_keeper/mod.rs +++ b/core/bin/zksync_core/src/house_keeper/mod.rs @@ -1,8 +1,14 @@ pub mod blocks_state_reporter; +pub mod fri_prover_job_retry_manager; +pub mod fri_prover_queue_monitor; +pub mod fri_scheduler_circuit_queuer; +pub mod fri_witness_generator_jobs_retry_manager; +pub mod fri_witness_generator_queue_monitor; pub mod gcs_blob_cleaner; pub mod gpu_prover_queue_monitor; pub mod periodic_job; pub mod prover_job_retry_manager; pub mod prover_queue_monitor; +pub mod waiting_to_queued_fri_witness_job_mover; pub mod waiting_to_queued_witness_job_mover; pub mod witness_generator_queue_monitor; diff --git a/core/bin/zksync_core/src/house_keeper/periodic_job.rs b/core/bin/zksync_core/src/house_keeper/periodic_job.rs index 190e6e2b3fd8..2558dd450b41 100644 --- a/core/bin/zksync_core/src/house_keeper/periodic_job.rs +++ b/core/bin/zksync_core/src/house_keeper/periodic_job.rs @@ -3,16 +3,14 @@ use std::time::Duration; use async_trait::async_trait; use tokio::time::sleep; -use zksync_dal::ConnectionPool; - #[async_trait] pub trait PeriodicJob: Sync + Send { const SERVICE_NAME: &'static str; /// Runs the routine task periodically in [`Self::polling_interval_ms()`] frequency. - fn run_routine_task(&mut self, connection_pool: ConnectionPool); + async fn run_routine_task(&mut self); - async fn run(mut self, connection_pool: ConnectionPool) + async fn run(mut self) where Self: Sized, { @@ -22,7 +20,7 @@ pub trait PeriodicJob: Sync + Send { self.polling_interval_ms() ); loop { - self.run_routine_task(connection_pool.clone()); + self.run_routine_task().await; sleep(Duration::from_millis(self.polling_interval_ms())).await; } } diff --git a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs index b362e888a0d0..ab8d3cd8c3ea 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_job_retry_manager.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use async_trait::async_trait; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -9,27 +10,38 @@ pub struct ProverJobRetryManager { max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, + prover_connection_pool: ConnectionPool, } impl ProverJobRetryManager { - pub fn new(max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64) -> Self { + pub fn new( + max_attempts: u32, + processing_timeout: Duration, + retry_interval_ms: u64, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { max_attempts, processing_timeout, retry_interval_ms, + prover_connection_pool, } } } /// Invoked periodically to re-queue stuck prover jobs. +#[async_trait] impl PeriodicJob for ProverJobRetryManager { const SERVICE_NAME: &'static str = "ProverJobRetryManager"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - let stuck_jobs = connection_pool - .access_storage_blocking() + async fn run_routine_task(&mut self) { + let stuck_jobs = self + .prover_connection_pool + .access_storage() + .await .prover_dal() - .requeue_stuck_jobs(self.processing_timeout, self.max_attempts); + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; let job_len = stuck_jobs.len(); for stuck_job in stuck_jobs { vlog::info!("re-queuing prover job {:?}", stuck_job); diff --git a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs index 3ad081646264..801c6b51478f 100644 --- a/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/prover_queue_monitor.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use zksync_config::configs::ProverGroupConfig; use zksync_dal::ConnectionPool; use zksync_prover_utils::circuit_name_to_numeric_index; @@ -7,25 +8,28 @@ use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct ProverStatsReporter { reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, } impl ProverStatsReporter { - pub fn new(reporting_interval_ms: u64) -> Self { + pub fn new(reporting_interval_ms: u64, prover_connection_pool: ConnectionPool) -> Self { Self { reporting_interval_ms, + prover_connection_pool, } } } /// Invoked periodically to push job statistics to Prometheus /// Note: these values will be used for manually scaling provers. +#[async_trait] impl PeriodicJob for ProverStatsReporter { const SERVICE_NAME: &'static str = "ProverStatsReporter"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { + async fn run_routine_task(&mut self) { let prover_group_config = ProverGroupConfig::from_env(); - let mut conn = connection_pool.access_storage_blocking(); - let stats = conn.prover_dal().get_prover_jobs_stats_per_circuit(); + let mut conn = self.prover_connection_pool.access_storage().await; + let stats = conn.prover_dal().get_prover_jobs_stats_per_circuit().await; for (circuit_name, stats) in stats.into_iter() { let group_id = prover_group_config @@ -51,14 +55,16 @@ impl PeriodicJob for ProverStatsReporter { ); } - if let Some(min_unproved_l1_batch_number) = conn.prover_dal().min_unproved_l1_batch_number() + if let Some(min_unproved_l1_batch_number) = + conn.prover_dal().min_unproved_l1_batch_number().await { metrics::gauge!("server.block_number", min_unproved_l1_batch_number.0 as f64, "stage" => "circuit_aggregation") } let lag_by_circuit_type = conn .prover_dal() - .min_unproved_l1_batch_number_by_basic_circuit_type(); + .min_unproved_l1_batch_number_by_basic_circuit_type() + .await; for (circuit_type, l1_batch_number) in lag_by_circuit_type { metrics::gauge!("server.block_number", l1_batch_number.0 as f64, "stage" => format!("circuit_{}", circuit_type)); diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs new file mode 100644 index 000000000000..e099caabf5df --- /dev/null +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -0,0 +1,88 @@ +use async_trait::async_trait; +use zksync_dal::ConnectionPool; + +use crate::house_keeper::periodic_job::PeriodicJob; + +#[derive(Debug)] +pub struct WaitingToQueuedFriWitnessJobMover { + job_moving_interval_ms: u64, + pool: ConnectionPool, +} + +impl WaitingToQueuedFriWitnessJobMover { + pub fn new(job_mover_interval_ms: u64, pool: ConnectionPool) -> Self { + Self { + job_moving_interval_ms: job_mover_interval_ms, + pool, + } + } + + async fn move_leaf_aggregation_jobs(&mut self) { + let mut conn = self.pool.access_storage().await; + let l1_batch_numbers = conn + .fri_witness_generator_dal() + .move_leaf_aggregation_jobs_from_waiting_to_queued() + .await; + let len = l1_batch_numbers.len(); + for (l1_batch_number, circuit_id) in l1_batch_numbers { + vlog::info!( + "Marked fri leaf aggregation job for l1_batch {} and circuit_id {} as queued", + l1_batch_number, + circuit_id + ); + } + metrics::counter!( + "server.node_fri_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } + + pub async fn move_node_aggregation_jobs_from_waiting_to_queued( + &mut self, + ) -> Vec<(i64, u8, u16)> { + let mut conn = self.pool.access_storage().await; + let mut jobs = conn + .fri_witness_generator_dal() + .move_depth_zero_node_aggregation_jobs() + .await; + jobs.extend( + conn.fri_witness_generator_dal() + .move_depth_non_zero_node_aggregation_jobs() + .await, + ); + jobs + } + + async fn move_node_aggregation_jobs(&mut self) { + let l1_batch_numbers = self + .move_node_aggregation_jobs_from_waiting_to_queued() + .await; + let len = l1_batch_numbers.len(); + for (l1_batch_number, circuit_id, depth) in l1_batch_numbers { + vlog::info!( + "Marked fri node aggregation job for l1_batch {} and circuit_id {} depth {} as queued", + l1_batch_number, + circuit_id, + depth + ); + } + metrics::counter!( + "server.leaf_fri_witness_generator.waiting_to_queued_jobs_transitions", + len as u64 + ); + } +} + +#[async_trait] +impl PeriodicJob for WaitingToQueuedFriWitnessJobMover { + const SERVICE_NAME: &'static str = "WaitingToQueuedFriWitnessJobMover"; + + async fn run_routine_task(&mut self) { + self.move_leaf_aggregation_jobs().await; + self.move_node_aggregation_jobs().await; + } + + fn polling_interval_ms(&self) -> u64 { + self.job_moving_interval_ms + } +} diff --git a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs index db46f756490e..1fb219e2b2f2 100644 --- a/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs +++ b/core/bin/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -5,26 +6,29 @@ use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct WaitingToQueuedWitnessJobMover { job_moving_interval_ms: u64, + prover_connection_pool: ConnectionPool, } impl WaitingToQueuedWitnessJobMover { - pub fn new(job_mover_interval_ms: u64) -> Self { + pub fn new(job_mover_interval_ms: u64, prover_connection_pool: ConnectionPool) -> Self { Self { job_moving_interval_ms: job_mover_interval_ms, + prover_connection_pool, } } - fn move_jobs(&mut self, pool: ConnectionPool) { - self.move_leaf_aggregation_jobs(pool.clone()); - self.move_node_aggregation_jobs(pool.clone()); - self.move_scheduler_jobs(pool); + async fn move_jobs(&mut self) { + self.move_leaf_aggregation_jobs().await; + self.move_node_aggregation_jobs().await; + self.move_scheduler_jobs().await; } - fn move_leaf_aggregation_jobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn move_leaf_aggregation_jobs(&mut self) { + let mut conn = self.prover_connection_pool.access_storage().await; let l1_batch_numbers = conn .witness_generator_dal() - .move_leaf_aggregation_jobs_from_waiting_to_queued(); + .move_leaf_aggregation_jobs_from_waiting_to_queued() + .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { vlog::info!( @@ -38,11 +42,12 @@ impl WaitingToQueuedWitnessJobMover { ); } - fn move_node_aggregation_jobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn move_node_aggregation_jobs(&mut self) { + let mut conn = self.prover_connection_pool.access_storage().await; let l1_batch_numbers = conn .witness_generator_dal() - .move_node_aggregation_jobs_from_waiting_to_queued(); + .move_node_aggregation_jobs_from_waiting_to_queued() + .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { vlog::info!( @@ -56,11 +61,12 @@ impl WaitingToQueuedWitnessJobMover { ); } - fn move_scheduler_jobs(&mut self, pool: ConnectionPool) { - let mut conn = pool.access_storage_blocking(); + async fn move_scheduler_jobs(&mut self) { + let mut conn = self.prover_connection_pool.access_storage().await; let l1_batch_numbers = conn .witness_generator_dal() - .move_scheduler_jobs_from_waiting_to_queued(); + .move_scheduler_jobs_from_waiting_to_queued() + .await; let len = l1_batch_numbers.len(); for l1_batch_number in l1_batch_numbers { vlog::info!( @@ -75,11 +81,12 @@ impl WaitingToQueuedWitnessJobMover { } } +#[async_trait] impl PeriodicJob for WaitingToQueuedWitnessJobMover { const SERVICE_NAME: &'static str = "WaitingToQueuedWitnessJobMover"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - self.move_jobs(connection_pool); + async fn run_routine_task(&mut self) { + self.move_jobs().await; } fn polling_interval_ms(&self) -> u64 { diff --git a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index dddc14d1b1c4..37f2b55f6cab 100644 --- a/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/bin/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use async_trait::async_trait; use zksync_dal::ConnectionPool; use zksync_types::proofs::{AggregationRound, JobCountStatistics}; @@ -10,39 +11,45 @@ const WITNESS_GENERATOR_SERVICE_NAME: &str = "witness_generator"; #[derive(Debug)] pub struct WitnessGeneratorStatsReporter { reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, } impl WitnessGeneratorStatsReporter { - pub fn new(reporting_interval_ms: u64) -> Self { + pub fn new(reporting_interval_ms: u64, prover_connection_pool: ConnectionPool) -> Self { Self { reporting_interval_ms, + prover_connection_pool, } } - fn get_job_statistics( - connection_pool: ConnectionPool, + async fn get_job_statistics( + prover_connection_pool: &ConnectionPool, ) -> HashMap { - let mut conn = connection_pool.access_storage_blocking(); + let mut conn = prover_connection_pool.access_storage().await; HashMap::from([ ( AggregationRound::BasicCircuits, conn.witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::BasicCircuits), + .get_witness_jobs_stats(AggregationRound::BasicCircuits) + .await, ), ( AggregationRound::LeafAggregation, conn.witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::LeafAggregation), + .get_witness_jobs_stats(AggregationRound::LeafAggregation) + .await, ), ( AggregationRound::NodeAggregation, conn.witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::NodeAggregation), + .get_witness_jobs_stats(AggregationRound::NodeAggregation) + .await, ), ( AggregationRound::Scheduler, conn.witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::Scheduler), + .get_witness_jobs_stats(AggregationRound::Scheduler) + .await, ), ]) } @@ -75,11 +82,12 @@ fn emit_metrics_for_round(round: AggregationRound, stats: JobCountStatistics) { /// Invoked periodically to push job statistics to Prometheus /// Note: these values will be used for auto-scaling job processors +#[async_trait] impl PeriodicJob for WitnessGeneratorStatsReporter { const SERVICE_NAME: &'static str = "WitnessGeneratorStatsReporter"; - fn run_routine_task(&mut self, connection_pool: ConnectionPool) { - let stats_for_all_rounds = Self::get_job_statistics(connection_pool); + async fn run_routine_task(&mut self) { + let stats_for_all_rounds = Self::get_job_statistics(&self.prover_connection_pool).await; let mut aggregated = JobCountStatistics::default(); for (round, stats) in stats_for_all_rounds { emit_metrics_for_round(round, stats); diff --git a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs index 2c2164f8fb13..a0c6dac365c7 100644 --- a/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/bin/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs @@ -42,6 +42,7 @@ async fn kept_updated() { internal_l1_pricing_multiplier: 0.8, internal_enforced_l1_gas_price: None, poll_period: 5, + max_l1_gas_price: None, }, ) .await diff --git a/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs index b9b584172f2b..cdd40f5fa028 100644 --- a/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs +++ b/core/bin/zksync_core/src/l1_gas_price/main_node_fetcher.rs @@ -54,6 +54,8 @@ impl MainNodeGasPriceFetcher { Ok(price) => price, Err(err) => { vlog::warn!("Unable to get the gas price: {}", err); + // A delay to avoid spamming the main node with requests. + tokio::time::sleep(SLEEP_INTERVAL).await; continue; } }; diff --git a/core/bin/zksync_core/src/l1_gas_price/mod.rs b/core/bin/zksync_core/src/l1_gas_price/mod.rs index 10cdf71940de..45e228d79c54 100644 --- a/core/bin/zksync_core/src/l1_gas_price/mod.rs +++ b/core/bin/zksync_core/src/l1_gas_price/mod.rs @@ -3,9 +3,11 @@ pub use gas_adjuster::bounded_gas_adjuster::BoundedGasAdjuster; pub use gas_adjuster::GasAdjuster; pub use main_node_fetcher::MainNodeGasPriceFetcher; +pub use singleton::GasAdjusterSingleton; mod gas_adjuster; mod main_node_fetcher; +pub mod singleton; /// Abstraction that provides information about the L1 gas price currently /// observed by the application. diff --git a/core/bin/zksync_core/src/l1_gas_price/singleton.rs b/core/bin/zksync_core/src/l1_gas_price/singleton.rs new file mode 100644 index 000000000000..83faa7c96ffa --- /dev/null +++ b/core/bin/zksync_core/src/l1_gas_price/singleton.rs @@ -0,0 +1,47 @@ +use crate::l1_gas_price::{BoundedGasAdjuster, GasAdjuster}; +use std::sync::Arc; +use tokio::sync::{watch, OnceCell}; +use tokio::task::JoinHandle; +use zksync_config::{ETHClientConfig, GasAdjusterConfig}; +use zksync_eth_client::clients::http::QueryClient; + +/// Special struct for creating a singleton of `GasAdjuster`. +/// This is needed only for running the server. This struct uses all configs from env. +#[derive(Debug, Default)] +pub struct GasAdjusterSingleton(OnceCell>>); + +impl GasAdjusterSingleton { + pub fn new() -> Self { + Default::default() + } + + pub async fn get_or_init(&mut self) -> Arc> { + let adjuster = self + .0 + .get_or_init(|| async { + let eth_client_config = ETHClientConfig::from_env(); + let query_client = QueryClient::new(ð_client_config.web3_url).unwrap(); + let gas_adjuster_config = GasAdjusterConfig::from_env(); + let adjuster = GasAdjuster::new(query_client.clone(), gas_adjuster_config) + .await + .unwrap(); + Arc::new(adjuster) + }) + .await; + adjuster.clone() + } + + pub async fn get_or_init_bounded( + &mut self, + ) -> Arc>> { + let config = GasAdjusterConfig::from_env(); + let adjuster = self.get_or_init().await; + Arc::new(BoundedGasAdjuster::new(config.max_l1_gas_price(), adjuster)) + } + + pub fn run_if_initialized(self, stop_signal: watch::Receiver) -> Option> { + self.0 + .get() + .map(|adjuster| tokio::spawn(adjuster.clone().run(stop_signal))) + } +} diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index 6d0164eb5884..a676d3de2e20 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -1,15 +1,10 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::future::Future; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; -use std::time::Instant; +use std::{str::FromStr, sync::Arc, time::Instant}; +use api_server::execution_sandbox::VmConcurrencyLimiter; use futures::channel::oneshot; -use futures::future; -use tokio::runtime::Builder; -use tokio::sync::watch; -use tokio::task::JoinHandle; +use tokio::{sync::watch, task::JoinHandle}; use house_keeper::periodic_job::PeriodicJob; use prometheus_exporter::run_prometheus_exporter; @@ -17,48 +12,68 @@ use zksync_circuit_breaker::{ facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, vks::VksChecker, CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, }; - -use zksync_config::configs::house_keeper::HouseKeeperConfig; -use zksync_config::configs::{ProverGroupConfig, WitnessGeneratorConfig}; -use zksync_config::{ProverConfigs, ZkSyncConfig}; -use zksync_dal::healthcheck::ConnectionPoolHealthCheck; -use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_eth_client::clients::http::PKSigningClient; -use zksync_eth_client::BoundEthInterface; +use zksync_config::configs::{ + api::{HealthCheckConfig, Web3JsonRpcConfig}, + chain::{ + self, CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, + StateKeeperConfig, + }, + house_keeper::HouseKeeperConfig, + FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProverGroupConfig, + WitnessGeneratorConfig, +}; +use zksync_config::{ + ApiConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, FetcherConfig, + ProverConfigs, +}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::{ + connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool, StorageProcessor, +}; +use zksync_eth_client::clients::http::QueryClient; +use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; use zksync_health_check::CheckHealth; -use zksync_mempool::MempoolStore; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; -use zksync_types::proofs::AggregationRound; -use zksync_types::L2ChainId; - -use crate::api_server::healthcheck; -use crate::api_server::tx_sender::{TxSender, TxSenderBuilder}; +use zksync_state::FactoryDepsCache; +use zksync_types::{proofs::AggregationRound, L2ChainId, PackedEthSignature, H160}; + +use crate::api_server::healthcheck::HealthCheckHandle; +use crate::api_server::tx_sender::TxSenderConfig; +use crate::api_server::web3::api_health_check::ApiHealthCheck; +use crate::api_server::web3::state::InternalApiConfig; +use crate::api_server::{ + healthcheck, + tx_sender::{TxSender, TxSenderBuilder}, +}; use crate::eth_sender::{Aggregator, EthTxManager}; -use crate::fee_monitor::FeeMonitor; -use crate::house_keeper::blocks_state_reporter::L1BatchMetricsReporter; +use crate::house_keeper::fri_prover_job_retry_manager::FriProverJobRetryManager; +use crate::house_keeper::fri_prover_queue_monitor::FriProverStatsReporter; +use crate::house_keeper::fri_scheduler_circuit_queuer::SchedulerCircuitQueuer; +use crate::house_keeper::fri_witness_generator_jobs_retry_manager::FriWitnessGeneratorJobRetryManager; +use crate::house_keeper::fri_witness_generator_queue_monitor::FriWitnessGeneratorStatsReporter; use crate::house_keeper::gcs_blob_cleaner::GcsBlobCleaner; -use crate::house_keeper::gpu_prover_queue_monitor::GpuProverQueueMonitor; use crate::house_keeper::{ + blocks_state_reporter::L1BatchMetricsReporter, gpu_prover_queue_monitor::GpuProverQueueMonitor, prover_job_retry_manager::ProverJobRetryManager, prover_queue_monitor::ProverStatsReporter, + waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover, waiting_to_queued_witness_job_mover::WaitingToQueuedWitnessJobMover, witness_generator_queue_monitor::WitnessGeneratorStatsReporter, }; -use crate::l1_gas_price::BoundedGasAdjuster; -use crate::l1_gas_price::L1GasPriceProvider; -use crate::metadata_calculator::{MetadataCalculator, TreeHealthCheck, TreeImplementation}; -use crate::state_keeper::mempool_actor::MempoolFetcher; -use crate::state_keeper::MempoolGuard; -use crate::witness_generator::basic_circuits::BasicWitnessGenerator; -use crate::witness_generator::leaf_aggregation::LeafAggregationWitnessGenerator; -use crate::witness_generator::node_aggregation::NodeAggregationWitnessGenerator; -use crate::witness_generator::scheduler::SchedulerWitnessGenerator; +use crate::l1_gas_price::{GasAdjusterSingleton, L1GasPriceProvider}; +use crate::metadata_calculator::{ + MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, TreeHealthCheck, +}; +use crate::state_keeper::{create_state_keeper, MempoolFetcher, MempoolGuard, MiniblockSealer}; +use crate::witness_generator::{ + basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, + node_aggregation::NodeAggregationWitnessGenerator, scheduler::SchedulerWitnessGenerator, +}; use crate::{ api_server::{explorer, web3}, data_fetchers::run_data_fetchers, eth_sender::EthTxAggregator, eth_watch::start_eth_watch, - l1_gas_price::GasAdjuster, }; pub mod api_server; @@ -67,7 +82,6 @@ pub mod consistency_checker; pub mod data_fetchers; pub mod eth_sender; pub mod eth_watch; -pub mod fee_monitor; pub mod fee_ticker; pub mod gas_tracker; pub mod genesis; @@ -79,44 +93,33 @@ pub mod state_keeper; pub mod sync_layer; pub mod witness_generator; -/// Waits for *any* of the tokio tasks to be finished. -/// Since thesks are used as actors which should live as long -/// as application runs, any possible outcome (either `Ok` or `Err`) is considered -/// as a reason to stop the server completely. -pub async fn wait_for_tasks(task_futures: Vec>, tasks_allowed_to_finish: bool) { - match future::select_all(task_futures).await.0 { - Ok(_) => { - if tasks_allowed_to_finish { - vlog::info!("One of the actors finished its run. Finishing execution."); - } else { - vlog::error!( - "One of the actors finished its run, while it wasn't expected to do it" - ); - } - } - Err(error) => { - vlog::error!( - "One of the tokio actors unexpectedly finished, shutting down: {:?}", - error - ); - } - } -} - /// Inserts the initial information about zkSync tokens into the database. -pub async fn genesis_init(config: ZkSyncConfig) { - let mut storage = StorageProcessor::establish_connection_blocking(true); +pub async fn genesis_init(eth_sender: ÐSenderConfig, network_config: &NetworkConfig) { + let mut storage = StorageProcessor::establish_connection(true).await; + let operator_address = PackedEthSignature::address_from_private_key( + ð_sender + .sender + .private_key() + .expect("Private key is required for genesis init"), + ) + .expect("Failed to restore operator address from private key"); + genesis::ensure_genesis_state( &mut storage, - L2ChainId(config.chain.eth.zksync_network_id), - genesis::GenesisParams::MainNode { + L2ChainId(network_config.zksync_network_id), + &genesis::GenesisParams::MainNode { // We consider the operator to be the first validator for now. - first_validator: config.eth_sender.sender.operator_commit_eth_addr, + first_validator: operator_address, }, ) .await; } +pub async fn is_genesis_needed() -> bool { + let mut storage = StorageProcessor::establish_connection(true).await; + storage.blocks_dal().is_genesis_needed().await +} + /// Sets up an interrupt handler and returns a future that resolves once an interrupt signal /// is received. pub fn setup_sigint_handler() -> oneshot::Receiver<()> { @@ -145,9 +148,7 @@ pub enum Component { ExplorerApi, // Metadata Calculator. Tree, - TreeNew, TreeLightweight, - TreeLightweightNew, TreeBackup, EthWatcher, // Eth tx generator @@ -181,10 +182,10 @@ impl FromStr for Components { "http_api" => Ok(Components(vec![Component::HttpApi])), "ws_api" => Ok(Components(vec![Component::WsApi])), "explorer_api" => Ok(Components(vec![Component::ExplorerApi])), - "tree" => Ok(Components(vec![Component::Tree])), - "tree_new" => Ok(Components(vec![Component::TreeNew])), - "tree_lightweight" => Ok(Components(vec![Component::TreeLightweight])), - "tree_lightweight_new" => Ok(Components(vec![Component::TreeLightweightNew])), + "tree" | "tree_new" => Ok(Components(vec![Component::Tree])), + "tree_lightweight" | "tree_lightweight_new" => { + Ok(Components(vec![Component::TreeLightweight])) + } "tree_backup" => Ok(Components(vec![Component::TreeBackup])), "data_fetcher" => Ok(Components(vec![Component::DataFetcher])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), @@ -235,138 +236,152 @@ impl FromStr for Components { } pub async fn initialize_components( - config: &ZkSyncConfig, components: Vec, use_prometheus_pushgateway: bool, ) -> anyhow::Result<( Vec>, watch::Sender, oneshot::Receiver, + HealthCheckHandle, )> { - vlog::info!("Starting the components: {:?}", components); - let connection_pool = ConnectionPool::new(None, true); - let replica_connection_pool = ConnectionPool::new(None, false); + vlog::info!("Starting the components: {components:?}"); + let connection_pool = ConnectionPool::new(None, DbVariant::Master).await; + let prover_connection_pool = ConnectionPool::new(None, DbVariant::Prover).await; + let replica_connection_pool = ConnectionPool::new(None, DbVariant::Replica).await; let mut healthchecks: Vec> = Vec::new(); + let contracts_config = ContractsConfig::from_env(); + let eth_client_config = ETHClientConfig::from_env(); + let circuit_breaker_config = CircuitBreakerConfig::from_env(); let circuit_breaker_checker = CircuitBreakerChecker::new( - circuit_breakers_for_components(&components, config), - &config.chain.circuit_breaker, + circuit_breakers_for_components( + &components, + ð_client_config.web3_url, + &circuit_breaker_config, + contracts_config.diamond_proxy_addr, + ) + .await, + &circuit_breaker_config, ); circuit_breaker_checker.check().await.unwrap_or_else(|err| { panic!("Circuit breaker triggered: {}", err); }); + let query_client = QueryClient::new(ð_client_config.web3_url).unwrap(); + let mut gas_adjuster = GasAdjusterSingleton::new(); + let (stop_sender, stop_receiver) = watch::channel(false); let (cb_sender, cb_receiver) = oneshot::channel(); // Prometheus exporter and circuit breaker checker should run for every component configuration. + let prom_config = PrometheusConfig::from_env(); let mut task_futures: Vec> = vec![ - run_prometheus_exporter(config.api.prometheus.clone(), use_prometheus_pushgateway), + run_prometheus_exporter( + prom_config.listener_port, + use_prometheus_pushgateway.then(|| { + ( + prom_config.pushgateway_url.clone(), + prom_config.push_interval(), + ) + }), + ), tokio::spawn(circuit_breaker_checker.run(cb_sender, stop_receiver.clone())), ]; - if components.contains(&Component::HttpApi) { - let started_at = Instant::now(); - vlog::info!("initializing HTTP API"); - task_futures.extend( - run_http_api( - config, + let factory_deps_cache = FactoryDepsCache::new( + "factory_deps_cache", + Web3JsonRpcConfig::from_env().factory_deps_cache_size_mb(), + ); + + if components.contains(&Component::WsApi) + || components.contains(&Component::HttpApi) + || components.contains(&Component::ExplorerApi) + { + let api_config = ApiConfig::from_env(); + let state_keeper_config = StateKeeperConfig::from_env(); + let network_config = NetworkConfig::from_env(); + let tx_sender_config = TxSenderConfig::new(&state_keeper_config, &api_config.web3_json_rpc); + let internal_api_config = InternalApiConfig::new( + &network_config, + &api_config.web3_json_rpc, + &contracts_config, + ); + if components.contains(&Component::HttpApi) { + let started_at = Instant::now(); + vlog::info!("initializing HTTP API"); + let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; + let (futures, health_check) = run_http_api( + &tx_sender_config, + &state_keeper_config, + &internal_api_config, + &api_config, connection_pool.clone(), replica_connection_pool.clone(), stop_receiver.clone(), + bounded_gas_adjuster.clone(), + state_keeper_config.save_call_traces, + factory_deps_cache.clone(), ) - .await, - ); - vlog::info!("initialized HTTP API in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "http_api"); - } + .await; + task_futures.extend(futures); + healthchecks.push(Box::new(health_check)); + vlog::info!("initialized HTTP API in {:?}", started_at.elapsed()); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "http_api"); + } - if components.contains(&Component::WsApi) { - let started_at = Instant::now(); - vlog::info!("initializing WS API"); - task_futures.extend( - run_ws_api( - config, + if components.contains(&Component::WsApi) { + let started_at = Instant::now(); + vlog::info!("initializing WS API"); + let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; + let (futures, health_check) = run_ws_api( + &tx_sender_config, + &state_keeper_config, + &internal_api_config, + &api_config, + bounded_gas_adjuster.clone(), connection_pool.clone(), replica_connection_pool.clone(), stop_receiver.clone(), + factory_deps_cache.clone(), ) - .await, - ); - vlog::info!("initialized WS API in {:?}", started_at.elapsed()); - metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "ws_api"); - } + .await; + task_futures.extend(futures); + healthchecks.push(Box::new(health_check)); + vlog::info!("initialized WS API in {:?}", started_at.elapsed()); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "ws_api"); + } - if components.contains(&Component::ExplorerApi) { - let started_at = Instant::now(); - vlog::info!("initializing explorer REST API"); - task_futures.push(explorer::start_server_thread_detached( - config.api.explorer.clone(), - config.contracts.l2_erc20_bridge_addr, - config.chain.state_keeper.fee_account_addr, - connection_pool.clone(), - replica_connection_pool.clone(), - stop_receiver.clone(), - )); - vlog::info!( - "initialized explorer REST API in {:?}", - started_at.elapsed() - ); - metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "explorer_api"); + if components.contains(&Component::ExplorerApi) { + let started_at = Instant::now(); + vlog::info!("initializing explorer REST API"); + task_futures.push(explorer::start_server_thread_detached( + api_config.explorer.clone(), + contracts_config.l2_erc20_bridge_addr, + state_keeper_config.fee_account_addr, + connection_pool.clone(), + replica_connection_pool.clone(), + stop_receiver.clone(), + )); + vlog::info!( + "initialized explorer REST API in {:?}", + started_at.elapsed() + ); + metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "explorer_api"); + } } if components.contains(&Component::StateKeeper) { let started_at = Instant::now(); vlog::info!("initializing State Keeper"); - let state_keeper_pool = ConnectionPool::new(Some(1), true); - let next_priority_id = state_keeper_pool - .access_storage_blocking() - .transactions_dal() - .next_priority_id(); - let mempool = MempoolGuard(Arc::new(Mutex::new(MempoolStore::new( - next_priority_id, - config.chain.mempool.capacity, - )))); - let eth_gateway = PKSigningClient::from_config(config); - let gas_adjuster = Arc::new( - GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) - .await - .unwrap(), - ); - - let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( - config.chain.state_keeper.max_l1_gas_price(), - gas_adjuster.clone(), - )); - task_futures.push(tokio::task::spawn(gas_adjuster.run(stop_receiver.clone()))); - - let state_keeper_actor = state_keeper::start_state_keeper( - config, - &state_keeper_pool, - mempool.clone(), - bounded_gas_adjuster.clone(), + let bounded_gas_adjuster = gas_adjuster.get_or_init_bounded().await; + add_state_keeper_to_task_futures( + &mut task_futures, + &contracts_config, + StateKeeperConfig::from_env(), + &DBConfig::from_env(), + &MempoolConfig::from_env(), + bounded_gas_adjuster, stop_receiver.clone(), - ); - - task_futures.push(tokio::task::spawn_blocking(move || { - state_keeper_actor.run() - })); - - let mempool_fetcher_pool = ConnectionPool::new(Some(1), true); - let mempool_fetcher_actor = MempoolFetcher::new(mempool, bounded_gas_adjuster, config); - task_futures.push(tokio::spawn(mempool_fetcher_actor.run( - mempool_fetcher_pool, - config.chain.mempool.remove_stuck_txs, - config.chain.mempool.stuck_tx_timeout(), - config.chain.state_keeper.fair_l2_gas_price, - stop_receiver.clone(), - ))); - - // Fee monitor is normally tied to a single instance of server, and it makes most sense to keep it together - // with state keeper (since without state keeper running there should be no balance changes). - let fee_monitor_eth_gateway = PKSigningClient::from_config(config); - let fee_monitor_pool = ConnectionPool::new(Some(1), true); - let fee_monitor_actor = FeeMonitor::new(config, fee_monitor_pool, fee_monitor_eth_gateway); - task_futures.push(tokio::spawn(fee_monitor_actor.run())); - + ) + .await; vlog::info!("initialized State Keeper in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "state_keeper"); } @@ -374,13 +389,12 @@ pub async fn initialize_components( if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); vlog::info!("initializing ETH-Watcher"); - let eth_gateway = PKSigningClient::from_config(config); - let eth_watch_pool = ConnectionPool::new(Some(1), true); + let eth_watch_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; task_futures.push( start_eth_watch( eth_watch_pool, - eth_gateway.clone(), - config, + query_client.clone(), + contracts_config.diamond_proxy_addr, stop_receiver.clone(), ) .await, @@ -392,18 +406,23 @@ pub async fn initialize_components( if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxAggregator"); - let eth_sender_storage = ConnectionPool::new(Some(1), true); - let eth_gateway = PKSigningClient::from_config(config); - let nonce = eth_gateway.pending_nonce("eth_sender").await.unwrap(); + let eth_sender_storage = ConnectionPool::new(Some(1), DbVariant::Master).await; + let eth_sender_prover_storage = ConnectionPool::new(Some(1), DbVariant::Prover).await; + + let eth_sender = ETHSenderConfig::from_env(); + let eth_client = + PKSigningClient::from_config(ð_sender, &contracts_config, ð_client_config); + let nonce = eth_client.pending_nonce("eth_sender").await.unwrap(); let eth_tx_aggregator_actor = EthTxAggregator::new( - config.eth_sender.sender.clone(), - Aggregator::new(config.eth_sender.sender.clone()), - config.contracts.validator_timelock_addr, + eth_sender.sender.clone(), + Aggregator::new(eth_sender.sender.clone()), + contracts_config.validator_timelock_addr, nonce.as_u64(), ); task_futures.push(tokio::spawn(eth_tx_aggregator_actor.run( eth_sender_storage.clone(), - eth_gateway.clone(), + eth_sender_prover_storage.clone(), + eth_client, stop_receiver.clone(), ))); vlog::info!("initialized ETH-TxAggregator in {:?}", started_at.elapsed()); @@ -413,33 +432,30 @@ pub async fn initialize_components( if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); vlog::info!("initializing ETH-TxManager"); - let eth_sender_storage = ConnectionPool::new(Some(1), true); - let eth_gateway = PKSigningClient::from_config(config); - let gas_adjuster = Arc::new( - GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) - .await - .unwrap(), - ); + let eth_sender_storage = ConnectionPool::new(Some(1), DbVariant::Master).await; + let eth_sender = ETHSenderConfig::from_env(); + let eth_client = + PKSigningClient::from_config(ð_sender, &contracts_config, ð_client_config); let eth_tx_manager_actor = EthTxManager::new( - config.eth_sender.sender.clone(), - gas_adjuster.clone(), - eth_gateway.clone(), + eth_sender.sender, + gas_adjuster.get_or_init().await, + eth_client, ); - task_futures.extend([ - tokio::spawn( - eth_tx_manager_actor.run(eth_sender_storage.clone(), stop_receiver.clone()), - ), - tokio::spawn(gas_adjuster.run(stop_receiver.clone())), - ]); + task_futures.extend([tokio::spawn( + eth_tx_manager_actor.run(eth_sender_storage, stop_receiver.clone()), + )]); vlog::info!("initialized ETH-TxManager in {:?}", started_at.elapsed()); metrics::gauge!("server.init.latency", started_at.elapsed(), "stage" => "eth_tx_aggregator"); } if components.contains(&Component::DataFetcher) { let started_at = Instant::now(); + let fetcher_config = FetcherConfig::from_env(); + let eth_network = chain::NetworkConfig::from_env(); vlog::info!("initializing data fetchers"); task_futures.extend(run_data_fetchers( - config, + &fetcher_config, + eth_network.network, connection_pool.clone(), stop_receiver.clone(), )); @@ -449,150 +465,156 @@ pub async fn initialize_components( let store_factory = ObjectStoreFactory::from_env(); add_trees_to_task_futures( + &mut task_futures, + &mut healthchecks, &components, - config, &store_factory, &stop_receiver, - &mut task_futures, - &mut healthchecks, - ); + ) + .await; add_witness_generator_to_task_futures( + &mut task_futures, &components, &connection_pool, + &prover_connection_pool, &store_factory, &stop_receiver, - &mut task_futures, - ); + ) + .await; if components.contains(&Component::Housekeeper) { - let house_keeper_config = HouseKeeperConfig::from_env(); - let l1_batch_metrics_reporter = - L1BatchMetricsReporter::new(house_keeper_config.l1_batch_metrics_reporting_interval_ms); - let gcs_blob_cleaner = GcsBlobCleaner::new( - &store_factory, - house_keeper_config.blob_cleaning_interval_ms, - ); - let gpu_prover_queue = GpuProverQueueMonitor::new( - ProverGroupConfig::from_env().synthesizer_per_gpu, - house_keeper_config.gpu_prover_queue_reporting_interval_ms, - ); - let config = ProverConfigs::from_env().non_gpu; - let prover_job_retry_manager = ProverJobRetryManager::new( - config.max_attempts, - config.proof_generation_timeout(), - house_keeper_config.prover_job_retrying_interval_ms, - ); - let prover_stats_reporter = - ProverStatsReporter::new(house_keeper_config.prover_stats_reporting_interval_ms); - let waiting_to_queued_witness_job_mover = - WaitingToQueuedWitnessJobMover::new(house_keeper_config.witness_job_moving_interval_ms); - let witness_generator_stats_reporter = WitnessGeneratorStatsReporter::new( - house_keeper_config.witness_generator_stats_reporting_interval_ms, - ); - - let witness_generator_metrics = [ - tokio::spawn(witness_generator_stats_reporter.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(gpu_prover_queue.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(gcs_blob_cleaner.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(l1_batch_metrics_reporter.run(ConnectionPool::new(Some(1), true))), - tokio::spawn(prover_stats_reporter.run(ConnectionPool::new(Some(1), true))), - tokio::spawn( - waiting_to_queued_witness_job_mover.run(ConnectionPool::new(Some(1), true)), - ), - tokio::spawn(prover_job_retry_manager.run(ConnectionPool::new(Some(1), true))), - ]; - - task_futures.extend(witness_generator_metrics); + add_house_keeper_to_task_futures(&mut task_futures, &store_factory).await; } // Run healthcheck server for all components. healthchecks.push(Box::new(ConnectionPoolHealthCheck::new( replica_connection_pool, ))); - task_futures.push(healthcheck::start_server_thread_detached( - config.api.healthcheck.bind_addr(), - healthchecks, + + let healtcheck_api_config = HealthCheckConfig::from_env(); + let health_check_handle = + healthcheck::start_server_thread_detached(healtcheck_api_config.bind_addr(), healthchecks); + + if let Some(task) = gas_adjuster.run_if_initialized(stop_receiver.clone()) { + task_futures.push(task); + } + Ok((task_futures, stop_sender, cb_receiver, health_check_handle)) +} + +async fn add_state_keeper_to_task_futures( + task_futures: &mut Vec>, + contracts_config: &ContractsConfig, + state_keeper_config: StateKeeperConfig, + db_config: &DBConfig, + mempool_config: &MempoolConfig, + gas_adjuster: Arc, + stop_receiver: watch::Receiver, +) { + let fair_l2_gas_price = state_keeper_config.fair_l2_gas_price; + let state_keeper_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let next_priority_id = state_keeper_pool + .access_storage() + .await + .transactions_dal() + .next_priority_id() + .await; + let mempool = MempoolGuard::new(next_priority_id, mempool_config.capacity); + + let miniblock_sealer_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new( + miniblock_sealer_pool, + state_keeper_config.miniblock_seal_queue_capacity, + ); + task_futures.push(tokio::spawn(miniblock_sealer.run())); + + let state_keeper = create_state_keeper( + contracts_config, + state_keeper_config, + db_config, + mempool_config, + state_keeper_pool, + mempool.clone(), + gas_adjuster.clone(), + miniblock_sealer_handle, + stop_receiver.clone(), + ) + .await; + task_futures.push(tokio::spawn(state_keeper.run())); + + let mempool_fetcher_pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let mempool_fetcher = MempoolFetcher::new(mempool, gas_adjuster, mempool_config); + let mempool_fetcher_handle = tokio::spawn(mempool_fetcher.run( + mempool_fetcher_pool, + mempool_config.remove_stuck_txs, + mempool_config.stuck_tx_timeout(), + fair_l2_gas_price, stop_receiver, )); - - Ok((task_futures, stop_sender, cb_receiver)) + task_futures.push(mempool_fetcher_handle); } -fn add_trees_to_task_futures( +async fn add_trees_to_task_futures( + task_futures: &mut Vec>, + healthchecks: &mut Vec>, components: &[Component], - config: &ZkSyncConfig, store_factory: &ObjectStoreFactory, stop_receiver: &watch::Receiver, - task_futures: &mut Vec>, - healthchecks: &mut Vec>, ) { - const COMPONENTS_TO_MODES: &[(Component, bool, TreeImplementation)] = &[ - (Component::Tree, true, TreeImplementation::Old), - (Component::TreeNew, true, TreeImplementation::New), - (Component::TreeLightweight, false, TreeImplementation::Old), - ( - Component::TreeLightweightNew, - false, - TreeImplementation::New, - ), - ]; + let db_config = DBConfig::from_env(); + let operation_config = OperationsManagerConfig::from_env(); + const COMPONENTS_TO_MODES: &[(Component, bool)] = + &[(Component::Tree, true), (Component::TreeLightweight, false)]; if components.contains(&Component::TreeBackup) { panic!("Tree backup mode is disabled"); } - if components.contains(&Component::TreeNew) - && components.contains(&Component::TreeLightweightNew) - { + if components.contains(&Component::Tree) && components.contains(&Component::TreeLightweight) { panic!( - "Cannot start a node with a new tree in both full and lightweight modes. \ + "Cannot start a node with a Merkle tree in both full and lightweight modes. \ Since the storage layout is mode-independent, choose either of modes and run \ the node with it." ); } - for &(component, is_full, implementation) in COMPONENTS_TO_MODES { + for &(component, is_full) in COMPONENTS_TO_MODES { if components.contains(&component) { - let store_factory = is_full.then_some(store_factory); + let mode = if is_full { + MetadataCalculatorModeConfig::Full { store_factory } + } else { + MetadataCalculatorModeConfig::Lightweight + }; let (future, tree_health_check) = - run_tree(config, store_factory, stop_receiver.clone(), implementation); + run_tree(&db_config, &operation_config, mode, stop_receiver.clone()).await; task_futures.push(future); healthchecks.push(Box::new(tree_health_check)); } } } -fn run_tree( - config: &ZkSyncConfig, - store_factory: Option<&ObjectStoreFactory>, +async fn run_tree( + config: &DBConfig, + operation_manager: &OperationsManagerConfig, + mode: MetadataCalculatorModeConfig<'_>, stop_receiver: watch::Receiver, - implementation: TreeImplementation, ) -> (JoinHandle<()>, TreeHealthCheck) { let started_at = Instant::now(); - vlog::info!( - "initializing Merkle tree with {:?} implementation in {} mode", - implementation, - if store_factory.is_some() { - "full" - } else { - "lightweight" - } - ); - - let metadata_calculator = if let Some(factory) = store_factory { - MetadataCalculator::full(config, factory, implementation) + let mode_str = if matches!(mode, MetadataCalculatorModeConfig::Full { .. }) { + "full" } else { - MetadataCalculator::lightweight(config, implementation) + "lightweight" }; + vlog::info!("Initializing Merkle tree in {mode_str} mode"); + + let config = MetadataCalculatorConfig::for_main_node(config, operation_manager, mode); + let metadata_calculator = MetadataCalculator::new(&config).await; let tree_health_check = metadata_calculator.tree_health_check(); let tree_tag = metadata_calculator.tree_tag(); - let future = tokio::task::spawn_blocking(|| { - let pool = ConnectionPool::new(Some(1), true); - metadata_calculator.run(&pool, stop_receiver); - }); + let pool = ConnectionPool::new(Some(1), DbVariant::Master).await; + let prover_pool = ConnectionPool::new(Some(1), DbVariant::Prover).await; + let future = tokio::spawn(metadata_calculator.run(pool, prover_pool, stop_receiver)); vlog::info!( - "initialized `{}` tree in {:?}", - tree_tag, + "Initialized `{tree_tag}` tree in {:?}", started_at.elapsed() ); metrics::gauge!( @@ -604,12 +626,13 @@ fn run_tree( (future, tree_health_check) } -fn add_witness_generator_to_task_futures( +async fn add_witness_generator_to_task_futures( + task_futures: &mut Vec>, components: &[Component], connection_pool: &ConnectionPool, + prover_connection_pool: &ConnectionPool, store_factory: &ObjectStoreFactory, stop_receiver: &watch::Receiver, - task_futures: &mut Vec>, ) { // We don't want witness generator to run on local nodes, as it's CPU heavy. if std::env::var("ZKSYNC_LOCAL_SETUP") == Ok("true".to_owned()) { @@ -627,170 +650,279 @@ fn add_witness_generator_to_task_futures( for (batch_size, component_type) in generator_params { let started_at = Instant::now(); vlog::info!( - "initializing the {:?} witness generator, batch size: {:?}", - component_type, - batch_size + "initializing the {component_type:?} witness generator, batch size: {batch_size:?}" ); let config = WitnessGeneratorConfig::from_env(); let task = match component_type { AggregationRound::BasicCircuits => { - let witness_generator = BasicWitnessGenerator::new(config, store_factory); - tokio::spawn(witness_generator.run( + let witness_generator = BasicWitnessGenerator::new( + config, + store_factory, connection_pool.clone(), - stop_receiver.clone(), - batch_size, - )) + prover_connection_pool.clone(), + ) + .await; + tokio::spawn(witness_generator.run(stop_receiver.clone(), batch_size)) } AggregationRound::LeafAggregation => { - let witness_generator = LeafAggregationWitnessGenerator::new(config, store_factory); - tokio::spawn(witness_generator.run( + let witness_generator = LeafAggregationWitnessGenerator::new( + config, + store_factory, connection_pool.clone(), - stop_receiver.clone(), - batch_size, - )) + prover_connection_pool.clone(), + ) + .await; + tokio::spawn(witness_generator.run(stop_receiver.clone(), batch_size)) } AggregationRound::NodeAggregation => { - let witness_generator = NodeAggregationWitnessGenerator::new(config, store_factory); - tokio::spawn(witness_generator.run( + let witness_generator = NodeAggregationWitnessGenerator::new( + config, + store_factory, connection_pool.clone(), - stop_receiver.clone(), - batch_size, - )) + prover_connection_pool.clone(), + ) + .await; + tokio::spawn(witness_generator.run(stop_receiver.clone(), batch_size)) } AggregationRound::Scheduler => { - let witness_generator = SchedulerWitnessGenerator::new(config, store_factory); - tokio::spawn(witness_generator.run( + let witness_generator = SchedulerWitnessGenerator::new( + config, + store_factory, connection_pool.clone(), - stop_receiver.clone(), - batch_size, - )) + prover_connection_pool.clone(), + ) + .await; + tokio::spawn(witness_generator.run(stop_receiver.clone(), batch_size)) } }; task_futures.push(task); vlog::info!( - "initialized {:?} witness generator in {:?}", - component_type, + "initialized {component_type:?} witness generator in {:?}", started_at.elapsed() ); metrics::gauge!( "server.init.latency", started_at.elapsed(), - "stage" => format!("witness_generator_{:?}", component_type) + "stage" => format!("witness_generator_{component_type:?}") ); } } -fn build_tx_sender( - config: &ZkSyncConfig, +async fn add_house_keeper_to_task_futures( + task_futures: &mut Vec>, + store_factory: &ObjectStoreFactory, +) { + let house_keeper_config = HouseKeeperConfig::from_env(); + let connection_pool = ConnectionPool::new(Some(1), DbVariant::Replica).await; + let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( + house_keeper_config.l1_batch_metrics_reporting_interval_ms, + connection_pool, + ); + + let prover_connection_pool = ConnectionPool::new( + Some(house_keeper_config.prover_db_pool_size), + DbVariant::Prover, + ) + .await; + let gpu_prover_queue = GpuProverQueueMonitor::new( + ProverGroupConfig::from_env().synthesizer_per_gpu, + house_keeper_config.gpu_prover_queue_reporting_interval_ms, + prover_connection_pool.clone(), + ); + let config = ProverConfigs::from_env().non_gpu; + let prover_job_retry_manager = ProverJobRetryManager::new( + config.max_attempts, + config.proof_generation_timeout(), + house_keeper_config.prover_job_retrying_interval_ms, + prover_connection_pool.clone(), + ); + let prover_stats_reporter = ProverStatsReporter::new( + house_keeper_config.prover_stats_reporting_interval_ms, + prover_connection_pool.clone(), + ); + let waiting_to_queued_witness_job_mover = WaitingToQueuedWitnessJobMover::new( + house_keeper_config.witness_job_moving_interval_ms, + prover_connection_pool.clone(), + ); + let witness_generator_stats_reporter = WitnessGeneratorStatsReporter::new( + house_keeper_config.witness_generator_stats_reporting_interval_ms, + prover_connection_pool.clone(), + ); + let gcs_blob_cleaner = GcsBlobCleaner::new( + store_factory, + prover_connection_pool.clone(), + house_keeper_config.blob_cleaning_interval_ms, + ) + .await; + + task_futures.push(tokio::spawn(gcs_blob_cleaner.run())); + task_futures.push(tokio::spawn(witness_generator_stats_reporter.run())); + task_futures.push(tokio::spawn(gpu_prover_queue.run())); + task_futures.push(tokio::spawn(l1_batch_metrics_reporter.run())); + task_futures.push(tokio::spawn(prover_stats_reporter.run())); + task_futures.push(tokio::spawn(waiting_to_queued_witness_job_mover.run())); + task_futures.push(tokio::spawn(prover_job_retry_manager.run())); + + // All FRI Prover related components are configured below. + let fri_prover_config = FriProverConfig::from_env(); + let fri_prover_job_retry_manager = FriProverJobRetryManager::new( + fri_prover_config.max_attempts, + fri_prover_config.proof_generation_timeout(), + house_keeper_config.fri_prover_job_retrying_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(fri_prover_job_retry_manager.run())); + + let fri_witness_gen_config = FriWitnessGeneratorConfig::from_env(); + let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( + fri_witness_gen_config.max_attempts, + fri_witness_gen_config.witness_generation_timeout(), + house_keeper_config.fri_witness_generator_job_retrying_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(fri_witness_gen_job_retry_manager.run())); + + let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( + house_keeper_config.fri_witness_job_moving_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(waiting_to_queued_fri_witness_job_mover.run())); + + let scheduler_circuit_queuer = SchedulerCircuitQueuer::new( + house_keeper_config.fri_witness_job_moving_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(scheduler_circuit_queuer.run())); + + let fri_witness_generator_stats_reporter = FriWitnessGeneratorStatsReporter::new( + prover_connection_pool.clone(), + house_keeper_config.witness_generator_stats_reporting_interval_ms, + ); + task_futures.push(tokio::spawn(fri_witness_generator_stats_reporter.run())); + + let fri_prover_stats_reporter = FriProverStatsReporter::new( + house_keeper_config.fri_prover_stats_reporting_interval_ms, + prover_connection_pool.clone(), + ); + task_futures.push(tokio::spawn(fri_prover_stats_reporter.run())); +} + +async fn build_tx_sender( + tx_sender_config: &TxSenderConfig, + web3_json_config: &Web3JsonRpcConfig, + state_keeper_config: &StateKeeperConfig, replica_pool: ConnectionPool, master_pool: ConnectionPool, l1_gas_price_provider: Arc, + factory_deps_cache: FactoryDepsCache, ) -> TxSender { - let mut tx_sender_builder = TxSenderBuilder::new(config.clone().into(), replica_pool) + let mut tx_sender_builder = TxSenderBuilder::new(tx_sender_config.clone(), replica_pool) .with_main_connection_pool(master_pool) - .with_state_keeper_config(config.chain.state_keeper.clone()); + .with_state_keeper_config(state_keeper_config.clone()); // Add rate limiter if enabled. - if let Some(transactions_per_sec_limit) = config.api.web3_json_rpc.transactions_per_sec_limit { + if let Some(transactions_per_sec_limit) = web3_json_config.transactions_per_sec_limit { tx_sender_builder = tx_sender_builder.with_rate_limiter(transactions_per_sec_limit); }; - tx_sender_builder.build( - l1_gas_price_provider, - config.chain.state_keeper.default_aa_hash, - ) + let vm_concurrency_limiter = VmConcurrencyLimiter::new(web3_json_config.vm_concurrency_limit); + + tx_sender_builder + .build( + l1_gas_price_provider, + tx_sender_config.default_aa, + Arc::new(vm_concurrency_limiter), + factory_deps_cache, + ) + .await } -async fn run_http_api( - config: &ZkSyncConfig, +#[allow(clippy::too_many_arguments)] +async fn run_http_api( + tx_sender_config: &TxSenderConfig, + state_keeper_config: &StateKeeperConfig, + internal_api: &InternalApiConfig, + api_config: &ApiConfig, master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, -) -> Vec> { - let eth_gateway = PKSigningClient::from_config(config); - let gas_adjuster = Arc::new( - GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) - .await - .unwrap(), - ); - let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( - config.chain.state_keeper.max_l1_gas_price(), - gas_adjuster.clone(), - )); - + gas_adjuster: Arc, + with_debug_namespace: bool, + factory_deps_cache: FactoryDepsCache, +) -> (Vec>, ApiHealthCheck) { let tx_sender = build_tx_sender( - config, + tx_sender_config, + &api_config.web3_json_rpc, + state_keeper_config, replica_connection_pool.clone(), master_connection_pool.clone(), - bounded_gas_adjuster, - ); - - let mut handles = { - let mut builder = - web3::ApiBuilder::jsonrpc_backend(config.clone().into(), replica_connection_pool) - .http(config.api.web3_json_rpc.http_port) - .with_filter_limit(config.api.web3_json_rpc.filters_limit()) - .with_threads(config.api.web3_json_rpc.threads_per_server as usize) - .with_tx_sender(tx_sender); - - if config.chain.state_keeper.save_call_traces { - builder = builder.enable_debug_namespace( - config.chain.state_keeper.base_system_contracts_hashes(), - config.chain.state_keeper.fair_l2_gas_price, - config.api.web3_json_rpc.vm_execution_cache_misses_limit, - ) - } + gas_adjuster, + factory_deps_cache.clone(), + ) + .await; - builder.build(stop_receiver.clone()) - }; + let mut builder = + web3::ApiBuilder::jsonrpsee_backend(internal_api.clone(), replica_connection_pool) + .http(api_config.web3_json_rpc.http_port) + .with_filter_limit(api_config.web3_json_rpc.filters_limit()) + .with_threads(api_config.web3_json_rpc.http_server_threads()) + .with_tx_sender(tx_sender); + + if with_debug_namespace { + builder = builder.enable_debug_namespace( + BaseSystemContractsHashes { + bootloader: tx_sender_config.bootloader, + default_aa: tx_sender_config.default_aa, + }, + tx_sender_config.fair_l2_gas_price, + api_config.web3_json_rpc.vm_execution_cache_misses_limit, + ) + } - handles.push(tokio::spawn(gas_adjuster.run(stop_receiver))); - handles + builder.build(stop_receiver.clone()).await } -async fn run_ws_api( - config: &ZkSyncConfig, +#[allow(clippy::too_many_arguments)] +async fn run_ws_api( + tx_sender_config: &TxSenderConfig, + state_keeper_config: &StateKeeperConfig, + internal_api: &InternalApiConfig, + api_config: &ApiConfig, + gas_adjuster: Arc, master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, -) -> Vec> { - let eth_gateway = PKSigningClient::from_config(config); - let gas_adjuster = Arc::new( - GasAdjuster::new(eth_gateway.clone(), config.eth_sender.gas_adjuster) - .await - .unwrap(), - ); - - let bounded_gas_adjuster = Arc::new(BoundedGasAdjuster::new( - config.chain.state_keeper.max_l1_gas_price(), - gas_adjuster.clone(), - )); - + factory_deps_cache: FactoryDepsCache, +) -> (Vec>, ApiHealthCheck) { let tx_sender = build_tx_sender( - config, + tx_sender_config, + &api_config.web3_json_rpc, + state_keeper_config, replica_connection_pool.clone(), master_connection_pool.clone(), - bounded_gas_adjuster, - ); + gas_adjuster, + factory_deps_cache.clone(), + ) + .await; - let mut tasks = - web3::ApiBuilder::jsonrpc_backend(config.clone().into(), replica_connection_pool) - .ws(config.api.web3_json_rpc.ws_port) - .with_filter_limit(config.api.web3_json_rpc.filters_limit()) - .with_subscriptions_limit(config.api.web3_json_rpc.subscriptions_limit()) - .with_polling_interval(config.api.web3_json_rpc.pubsub_interval()) - .with_tx_sender(tx_sender) - .build(stop_receiver.clone()); - - tasks.push(tokio::spawn(gas_adjuster.run(stop_receiver))); - tasks + web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool) + .ws(api_config.web3_json_rpc.ws_port) + .with_filter_limit(api_config.web3_json_rpc.filters_limit()) + .with_subscriptions_limit(api_config.web3_json_rpc.subscriptions_limit()) + .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) + .with_threads(api_config.web3_json_rpc.ws_server_threads()) + .with_tx_sender(tx_sender) + .build(stop_receiver.clone()) + .await } -fn circuit_breakers_for_components( +async fn circuit_breakers_for_components( components: &[Component], - config: &ZkSyncConfig, + web3_url: &str, + circuit_breaker_config: &CircuitBreakerConfig, + main_contract: H160, ) -> Vec> { let mut circuit_breakers: Vec> = Vec::new(); @@ -801,23 +933,21 @@ fn circuit_breakers_for_components( ) }) { circuit_breakers.push(Box::new(FailedL1TransactionChecker { - pool: ConnectionPool::new(Some(1), false), + pool: ConnectionPool::new(Some(1), DbVariant::Replica).await, })); } if components.iter().any(|c| { matches!( c, - Component::EthTxAggregator - | Component::EthTxManager - | Component::Tree - | Component::TreeBackup + Component::EthTxAggregator | Component::EthTxManager | Component::TreeBackup ) }) { - let eth_client = PKSigningClient::from_config(config); + let eth_client = QueryClient::new(web3_url).unwrap(); circuit_breakers.push(Box::new(VksChecker::new( - &config.chain.circuit_breaker, + circuit_breaker_config, eth_client, + main_contract, ))); } @@ -825,39 +955,23 @@ fn circuit_breakers_for_components( .iter() .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) { - let eth_client = PKSigningClient::from_config(config); + let eth_client = QueryClient::new(web3_url).unwrap(); circuit_breakers.push(Box::new(FacetSelectorsChecker::new( - &config.chain.circuit_breaker, + circuit_breaker_config, eth_client, + main_contract, ))); } circuit_breakers } -pub fn block_on(future: F) -> F::Output -where - F::Output: Send, -{ - std::thread::spawn(move || { - let runtime = Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio runtime creation failed"); - runtime.block_on(future) - }) - .join() - .unwrap() -} - #[tokio::test] async fn test_house_keeper_components_get_added() { - let config = ZkSyncConfig::from_env(); - let (core_task_handles, _, _) = - initialize_components(&config, vec![Component::Housekeeper], false) - .await - .unwrap(); - // circuit-breaker, prometheus-exporter, healthcheck components are run, irrespective of other components. - let always_running_component_count = 3; - assert_eq!(7, core_task_handles.len() - always_running_component_count); + let (core_task_handles, _, _, _) = initialize_components(vec![Component::Housekeeper], false) + .await + .unwrap(); + // circuit-breaker, prometheus-exporter components are run, irrespective of other components. + let always_running_component_count = 2; + assert_eq!(13, core_task_handles.len() - always_running_component_count); } diff --git a/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs b/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs index 32feec5fafd2..751495156c9f 100644 --- a/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs +++ b/core/bin/zksync_core/src/metadata_calculator/healthcheck.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use tokio::sync::watch; use zksync_health_check::{CheckHealth, CheckHealthStatus}; @@ -24,8 +25,9 @@ impl TreeHealthCheck { } } +#[async_trait] impl CheckHealth for TreeHealthCheck { - fn check_health(&self) -> CheckHealthStatus { + async fn check_health(&self) -> CheckHealthStatus { match *self.receiver.borrow() { MetadataCalculatorStatus::Ready => CheckHealthStatus::Ready, MetadataCalculatorStatus::NotReady => CheckHealthStatus::NotReady(format!( diff --git a/core/bin/zksync_core/src/metadata_calculator/helpers.rs b/core/bin/zksync_core/src/metadata_calculator/helpers.rs index b7eec35dd926..5eaa20cbf649 100644 --- a/core/bin/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/bin/zksync_core/src/metadata_calculator/helpers.rs @@ -1,100 +1,96 @@ //! Various helpers for the metadata calculator. #[cfg(test)] -use std::sync::mpsc; -use std::{collections::BTreeMap, thread, time::Duration}; +use tokio::sync::mpsc; + +use std::{collections::BTreeMap, future::Future, mem, time::Duration}; use zksync_dal::StorageProcessor; -use zksync_merkle_tree::{TreeMetadata, TreeMode, ZkSyncTree as OldTree}; -use zksync_merkle_tree2::domain::{TreeMetadata as NewTreeMetadata, ZkSyncTree as NewTree}; +use zksync_merkle_tree::domain::{TreeMetadata, ZkSyncTree}; use zksync_types::{ - block::WitnessBlockWithLogs, L1BatchNumber, StorageKey, StorageLog, StorageLogKind, - WitnessStorageLog, H256, + block::WitnessBlockWithLogs, L1BatchNumber, StorageKey, StorageLog, WitnessStorageLog, H256, }; /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. -#[derive(Debug)] -pub(super) enum ZkSyncTree { - Old(OldTree), - New(NewTree), -} +/// +/// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; +/// `ZkSyncTree` is only indirectly available via `MetadataCalculator::run()` entrypoint +/// which consumes `self`. That is, if `MetadataCalculator::run()` is cancelled (which we don't currently do, +/// at least not explicitly), all `MetadataCalculator` data including `ZkSyncTree` is discarded. +/// In the unlikely case you get a "`ZkSyncTree` is in inconsistent state" panic, +/// cancellation is most probably the reason. +#[derive(Debug, Default)] +pub(super) struct AsyncTree(Option); + +impl AsyncTree { + const INCONSISTENT_MSG: &'static str = + "`ZkSyncTree` is in inconsistent state, which could occur after one of its blocking futures was cancelled"; + + pub fn new(tree: ZkSyncTree) -> Self { + Self(Some(tree)) + } -impl ZkSyncTree { - pub fn map_metadata(new_metadata: NewTreeMetadata) -> TreeMetadata { - TreeMetadata { - root_hash: new_metadata.root_hash, - rollup_last_leaf_index: new_metadata.rollup_last_leaf_index, - initial_writes: new_metadata.initial_writes, - repeated_writes: new_metadata.repeated_writes, - witness_input: new_metadata.witness, - } + fn as_ref(&self) -> &ZkSyncTree { + self.0.as_ref().expect(Self::INCONSISTENT_MSG) + } + + fn as_mut(&mut self) -> &mut ZkSyncTree { + self.0.as_mut().expect(Self::INCONSISTENT_MSG) } pub fn is_empty(&self) -> bool { - match self { - Self::Old(tree) => tree.is_empty(), - Self::New(tree) => tree.is_empty(), - } + self.as_ref().is_empty() } pub fn block_number(&self) -> u32 { - match self { - Self::Old(tree) => tree.block_number(), - Self::New(tree) => tree.block_number(), - } + self.as_ref().block_number() } pub fn root_hash(&self) -> H256 { - match self { - Self::Old(tree) => tree.root_hash(), - Self::New(tree) => tree.root_hash(), - } + self.as_ref().root_hash() } - pub fn process_block(&mut self, block: &[WitnessStorageLog]) -> TreeMetadata { - match self { - Self::Old(tree) => tree.process_block(block), - Self::New(tree) => { - tree.reset(); // For compatibility with the old implementation - let new_metadata = tree.process_block(block); - Self::map_metadata(new_metadata) - } - } + pub async fn process_block(&mut self, block: Vec) -> TreeMetadata { + let mut tree = mem::take(self); + let (tree, metadata) = tokio::task::spawn_blocking(move || { + let metadata = tree.as_mut().process_block(&block); + (tree, metadata) + }) + .await + .unwrap(); + + *self = tree; + metadata } - pub fn process_blocks<'a>( + pub async fn process_blocks( &mut self, - blocks: impl Iterator, + blocks: Vec>, ) -> Vec { - match self { - Self::Old(tree) => { - let mode = tree.mode(); - let blocks = blocks.map(|logs| Self::filter_block_logs(logs, mode)); - tree.process_blocks(blocks) - } - Self::New(tree) => { - tree.reset(); // For compatibility with the old implementation - blocks - .map(|block| Self::map_metadata(tree.process_block(block))) - .collect() - } - } - } - - fn filter_block_logs( - logs: &[WitnessStorageLog], - mode: TreeMode, - ) -> impl Iterator + '_ { - logs.iter().filter(move |log| { - matches!(mode, TreeMode::Full) || log.storage_log.kind == StorageLogKind::Write + let mut tree = mem::take(self); + let (tree, metadata) = tokio::task::spawn_blocking(move || { + tree.as_mut().reset(); // For compatibility with the old implementation + let metadata = blocks + .iter() + .map(|block| tree.as_mut().process_block(block)) + .collect(); + (tree, metadata) }) + .await + .unwrap(); + + *self = tree; + metadata } - pub fn save(&mut self) { - match self { - Self::Old(tree) => tree.save().expect("failed saving Merkle tree"), - Self::New(tree) => tree.save(), - } + pub async fn save(&mut self) { + let mut tree = mem::take(self); + *self = tokio::task::spawn_blocking(|| { + tree.as_mut().save(); + tree + }) + .await + .unwrap(); } } @@ -107,7 +103,7 @@ pub(super) struct Delayer { // runs out of blocks to process. (Since RocksDB is exclusive, we cannot just create // another instance to check these params on the test side without stopping the calc.) #[cfg(test)] - pub delay_notifier: mpsc::Sender<(u32, H256)>, + pub delay_notifier: mpsc::UnboundedSender<(u32, H256)>, } impl Delayer { @@ -115,48 +111,53 @@ impl Delayer { Self { delay_interval, #[cfg(test)] - delay_notifier: mpsc::channel().0, + delay_notifier: mpsc::unbounded_channel().0, } } #[cfg_attr(not(test), allow(unused))] // `tree` is only used in test mode - pub fn wait(&self, tree: &ZkSyncTree) { + pub fn wait(&self, tree: &AsyncTree) -> impl Future { #[cfg(test)] self.delay_notifier .send((tree.block_number(), tree.root_hash())) .ok(); - - thread::sleep(self.delay_interval); + tokio::time::sleep(self.delay_interval) } } -pub(crate) fn get_logs_for_l1_batch( +pub(crate) async fn get_logs_for_l1_batch( storage: &mut StorageProcessor<'_>, l1_batch_number: L1BatchNumber, ) -> Option { - let header = storage.blocks_dal().get_block_header(l1_batch_number)?; + let header = storage + .blocks_dal() + .get_block_header(l1_batch_number) + .await?; // `BTreeMap` is used because tree needs to process slots in lexicographical order. let mut storage_logs: BTreeMap = BTreeMap::new(); let protective_reads = storage .storage_logs_dedup_dal() - .get_protective_reads_for_l1_batch(l1_batch_number); + .get_protective_reads_for_l1_batch(l1_batch_number) + .await; let touched_slots = storage .storage_logs_dal() - .get_touched_slots_for_l1_batch(l1_batch_number); + .get_touched_slots_for_l1_batch(l1_batch_number) + .await; - let hashed_keys = protective_reads + let hashed_keys: Vec<_> = protective_reads .iter() .chain(touched_slots.keys()) .map(StorageKey::hashed_key) .collect(); let previous_values = storage .storage_logs_dal() - .get_previous_storage_values(hashed_keys, l1_batch_number); + .get_previous_storage_values(&hashed_keys, l1_batch_number) + .await; for storage_key in protective_reads { - let previous_value = previous_values[&storage_key.hashed_key()]; + let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); // Sanity check: value must not change for slots that require protective reads. if let Some(value) = touched_slots.get(&storage_key) { assert_eq!( @@ -175,7 +176,7 @@ pub(crate) fn get_logs_for_l1_batch( } for (storage_key, value) in touched_slots { - let previous_value = previous_values[&storage_key.hashed_key()]; + let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); if previous_value != value { storage_logs.insert( storage_key, diff --git a/core/bin/zksync_core/src/metadata_calculator/metrics.rs b/core/bin/zksync_core/src/metadata_calculator/metrics.rs index ebf6ae7216dc..75db7accb748 100644 --- a/core/bin/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/bin/zksync_core/src/metadata_calculator/metrics.rs @@ -39,13 +39,6 @@ impl TreeUpdateStage { start: Instant::now(), } } - - pub fn run(self, action: impl FnOnce() -> T) -> T { - let latency = self.start(); - let output = action(); - latency.report(); - output - } } /// Latency metric for a certain stage of the tree update. @@ -58,11 +51,16 @@ pub(super) struct UpdateTreeLatency { impl UpdateTreeLatency { pub fn report(self) { + let elapsed = self.start.elapsed(); metrics::histogram!( "server.metadata_calculator.update_tree.latency.stage", - self.start.elapsed(), + elapsed, "stage" => self.stage.as_str() ); + vlog::trace!( + "Metadata calculator stage `{stage}` completed in {elapsed:?}", + stage = self.stage.as_str() + ); } } @@ -74,19 +72,15 @@ impl MetadataCalculator { start: Instant, ) { let mode_tag = mode.as_tag(); - let tree_implementation = mode.tree_implementation(); - let tree_tag = tree_implementation.as_tag(); metrics::histogram!( "server.metadata_calculator.update_tree.latency", - start.elapsed(), - "tree" => tree_tag + start.elapsed() ); if total_logs > 0 { metrics::histogram!( "server.metadata_calculator.update_tree.per_log.latency", - start.elapsed().div_f32(total_logs as f32), - "tree" => tree_tag + start.elapsed().div_f32(total_logs as f32) ); } @@ -95,40 +89,24 @@ impl MetadataCalculator { .iter() .map(|block| u64::from(block.l1_tx_count)) .sum(); - metrics::counter!( - "server.processed_txs", - total_tx as u64, - "stage" => "tree", - "tree" => tree_tag - ); - metrics::counter!( - "server.processed_l1_txs", - total_l1_tx, - "stage" => "tree", - "tree" => tree_tag - ); - metrics::histogram!( - "server.metadata_calculator.log_batch", - total_logs as f64, - "tree" => tree_tag - ); + metrics::counter!("server.processed_txs", total_tx as u64, "stage" => "tree"); + metrics::counter!("server.processed_l1_txs", total_l1_tx, "stage" => "tree"); + metrics::histogram!("server.metadata_calculator.log_batch", total_logs as f64); metrics::histogram!( "server.metadata_calculator.blocks_batch", - block_headers.len() as f64, - "tree" => tree_tag + block_headers.len() as f64 ); + let first_block_number = block_headers.first().unwrap().number.0; let last_block_number = block_headers.last().unwrap().number.0; vlog::info!( - "block {:?} processed in {} tree", - last_block_number, - tree_tag + "L1 batches #{:?} processed in tree", + first_block_number..=last_block_number ); metrics::gauge!( "server.block_number", last_block_number as f64, - "stage" => format!("tree_{}_mode", mode_tag), - "tree" => tree_tag + "stage" => format!("tree_{mode_tag}_mode") ); let latency = @@ -136,8 +114,7 @@ impl MetadataCalculator { metrics::histogram!( "server.block_latency", latency as f64, - "stage" => format!("tree_{}_mode", mode_tag), - "tree" => tree_tag + "stage" => format!("tree_{mode_tag}_mode") ); } } diff --git a/core/bin/zksync_core/src/metadata_calculator/mod.rs b/core/bin/zksync_core/src/metadata_calculator/mod.rs index bb685b25ad5e..edc5dd4f9810 100644 --- a/core/bin/zksync_core/src/metadata_calculator/mod.rs +++ b/core/bin/zksync_core/src/metadata_calculator/mod.rs @@ -5,18 +5,11 @@ use tokio::sync::watch; use std::time::Duration; -use zksync_config::{DBConfig, ZkSyncConfig}; +use zksync_config::configs::chain::OperationsManagerConfig; +use zksync_config::DBConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_merkle_tree::TreeMetadata; +use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStoreFactory; -use zksync_storage::{ - db::Database, - rocksdb::{ - backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, - Options, DB, - }, - RocksDB, -}; use zksync_types::{ block::L1BatchHeader, commitment::{BlockCommitment, BlockMetadata, BlockWithMetadata}, @@ -34,49 +27,79 @@ pub(crate) use self::helpers::get_logs_for_l1_batch; use self::{helpers::Delayer, metrics::TreeUpdateStage, updater::TreeUpdater}; #[derive(Debug, Copy, Clone)] -pub enum TreeImplementation { - Old, - New, +enum MetadataCalculatorMode { + Full, + Lightweight, } -impl TreeImplementation { +impl MetadataCalculatorMode { fn as_tag(self) -> &'static str { match self { - Self::Old => "old", - Self::New => "new", + Self::Full => "full", + Self::Lightweight => "lightweight", } } } -#[derive(Debug, Copy, Clone)] -enum MetadataCalculatorMode { - Full(TreeImplementation), - Lightweight(TreeImplementation), +#[derive(Debug, PartialEq)] +pub enum MetadataCalculatorStatus { + Ready, + NotReady, } -impl MetadataCalculatorMode { - fn as_tag(self) -> &'static str { - match self { - Self::Full(TreeImplementation::Old) => "full", - // ^ chosen for backward compatibility - Self::Full(TreeImplementation::New) => "full_new", - Self::Lightweight(TreeImplementation::Old) => "lightweight", - // ^ chosen for backward compatibility - Self::Lightweight(TreeImplementation::New) => "lightweight_new", - } - } +/// Part of [`MetadataCalculator`] related to its syncing mode. +#[derive(Debug, Clone, Copy)] +pub enum MetadataCalculatorModeConfig<'a> { + /// In this mode, `MetadataCalculator` computes Merkle tree root hashes and some auxiliary information + /// for blocks, but not witness inputs. + Lightweight, + /// In this mode, `MetadataCalculator` will compute witness inputs for all storage operations + /// and put them into the object store as provided by `store_factory` (e.g., GCS). + Full { + store_factory: &'a ObjectStoreFactory, + }, +} - fn tree_implementation(self) -> TreeImplementation { - match self { - Self::Full(implementation) | Self::Lightweight(implementation) => implementation, +impl MetadataCalculatorModeConfig<'_> { + fn to_mode(self) -> MetadataCalculatorMode { + if matches!(self, Self::Full { .. }) { + MetadataCalculatorMode::Full + } else { + MetadataCalculatorMode::Lightweight } } } -#[derive(Debug, PartialEq)] -pub enum MetadataCalculatorStatus { - Ready, - NotReady, +/// Configuration of [`MetadataCalculator`]. +#[derive(Debug)] +pub struct MetadataCalculatorConfig<'a> { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: &'a str, + /// Tree syncing mode. + pub mode: MetadataCalculatorModeConfig<'a>, + /// Interval between polling Postgres for updates if no progress was made by the tree. + pub delay_interval: Duration, + /// Maximum number of L1 batches to get from Postgres on a single update iteration. + pub max_block_batch: usize, + /// Sleep interval between tree updates if the tree has made progress. This is only applied + /// to the tree in the lightweight mode. + pub throttle_interval: Duration, +} + +impl<'a> MetadataCalculatorConfig<'a> { + pub(crate) fn for_main_node( + db_config: &'a DBConfig, + operation_config: &'a OperationsManagerConfig, + mode: MetadataCalculatorModeConfig<'a>, + ) -> Self { + Self { + db_path: &db_config.new_merkle_tree_ssd_path, + mode, + delay_interval: operation_config.delay_interval(), + throttle_interval: db_config.new_merkle_tree_throttle_interval(), + max_block_batch: db_config.max_block_batch(), + } + } } #[derive(Debug)] @@ -88,47 +111,26 @@ pub struct MetadataCalculator { } impl MetadataCalculator { - /// Creates a calculator operating in the lightweight sync mode. In this mode, the calculator - /// computes Merkle tree root hashes and some auxiliary information for blocks, but not - /// witness inputs. - pub fn lightweight(config: &ZkSyncConfig, implementation: TreeImplementation) -> Self { - let mode = MetadataCalculatorMode::Lightweight(implementation); - Self::new(config, None, mode) - } + /// Creates a calculator with the specified `config`. + pub async fn new(config: &MetadataCalculatorConfig<'_>) -> Self { - /// Creates a calculator operating in the full sync mode. In this mode, the calculator - /// will compute witness inputs for all storage operations and put them into the object store - /// as provided by `store_factory` (e.g., GCS). - pub fn full( - config: &ZkSyncConfig, - store_factory: &ObjectStoreFactory, - implementation: TreeImplementation, - ) -> Self { - let mode = MetadataCalculatorMode::Full(implementation); - Self::new(config, Some(store_factory), mode) - } - - fn new( - config: &ZkSyncConfig, - store_factory: Option<&ObjectStoreFactory>, - mode: MetadataCalculatorMode, - ) -> Self { - use self::TreeImplementation::New; - - let db_path = Self::db_path(&config.db, mode); - let db = Self::create_db(db_path); - let object_store = store_factory.map(ObjectStoreFactory::create_store); - let updater = TreeUpdater::new(mode, db, &config.db, object_store); - let delay_interval = config.chain.operations_manager.delay_interval(); - let throttle_interval = if matches!(mode, MetadataCalculatorMode::Lightweight(New)) { - config.db.new_merkle_tree_throttle_interval() + let mode = config.mode.to_mode(); + let object_store = match config.mode { + MetadataCalculatorModeConfig::Full { store_factory } => { + Some(store_factory.create_store().await) + } + MetadataCalculatorModeConfig::Lightweight => None, + }; + let updater = TreeUpdater::new(mode, config.db_path, config.max_block_batch, object_store); + let throttle_interval = if matches!(mode, MetadataCalculatorMode::Lightweight) { + config.throttle_interval } else { Duration::ZERO }; let (status_sender, _) = watch::channel(MetadataCalculatorStatus::NotReady); Self { updater, - delayer: Delayer::new(delay_interval), + delayer: Delayer::new(config.delay_interval), throttler: Delayer::new(throttle_interval), status_sender, } @@ -145,61 +147,45 @@ impl MetadataCalculator { self.updater.mode().as_tag() } - fn db_path(config: &DBConfig, mode: MetadataCalculatorMode) -> &str { - use self::TreeImplementation::{New, Old}; - - match mode { - MetadataCalculatorMode::Full(Old) => config.path(), - MetadataCalculatorMode::Lightweight(Old) => config.merkle_tree_fast_ssd_path(), - MetadataCalculatorMode::Full(New) | MetadataCalculatorMode::Lightweight(New) => { - &config.new_merkle_tree_ssd_path - } - } - } - - fn create_db(path: &str) -> RocksDB { - let db = RocksDB::new(Database::MerkleTree, path, true); - if cfg!(test) { - // We need sync writes for the unit tests to execute reliably. With the default config, - // some writes to RocksDB may occur, but not be visible to the test code. - db.with_sync_writes() - } else { - db - } - } - - pub fn run(self, pool: &ConnectionPool, stop_receiver: watch::Receiver) { - self.updater.loop_updating_tree( + pub async fn run( + self, + pool: ConnectionPool, + prover_pool: ConnectionPool, + stop_receiver: watch::Receiver, + ) { + let update_task = self.updater.loop_updating_tree( self.delayer, self.throttler, - pool, + &pool, + &prover_pool, stop_receiver, self.status_sender, ); + update_task.await; } /// This is used to improve L1 gas estimation for the commit operation. The estimations are computed /// in the State Keeper, where storage writes aren't yet deduplicated, whereas block metadata /// contains deduplicated storage writes. - fn reestimate_block_commit_gas( + async fn reestimate_block_commit_gas( storage: &mut StorageProcessor<'_>, block_header: L1BatchHeader, metadata: BlockMetadata, ) -> BlockWithMetadata { - TreeUpdateStage::ReestimateGasCost.run(|| { - let unsorted_factory_deps = storage - .blocks_dal() - .get_l1_batch_factory_deps(block_header.number); - let block_with_metadata = - BlockWithMetadata::new(block_header, metadata, unsorted_factory_deps); - let commit_gas_cost = - crate::gas_tracker::commit_gas_count_for_block(&block_with_metadata); - storage.blocks_dal().update_predicted_block_commit_gas( - block_with_metadata.header.number, - commit_gas_cost, - ); - block_with_metadata - }) + let reestimate_gas_cost = TreeUpdateStage::ReestimateGasCost.start(); + let unsorted_factory_deps = storage + .blocks_dal() + .get_l1_batch_factory_deps(block_header.number) + .await; + let block_with_metadata = + BlockWithMetadata::new(block_header, metadata, unsorted_factory_deps); + let commit_gas_cost = crate::gas_tracker::commit_gas_count_for_block(&block_with_metadata); + storage + .blocks_dal() + .update_predicted_block_commit_gas(block_with_metadata.header.number, commit_gas_cost) + .await; + reestimate_gas_cost.report(); + block_with_metadata } fn build_block_metadata( @@ -218,7 +204,7 @@ impl MetadataCalculator { l1_batch_header.base_system_contracts_hashes.default_aa, ); let block_commitment_hash = block_commitment.hash(); - vlog::trace!("Block commitment {:?}", &block_commitment); + vlog::trace!("Block commitment: {block_commitment:?}"); let metadata = BlockMetadata { root_hash: merkle_root_hash, @@ -235,36 +221,7 @@ impl MetadataCalculator { pass_through_data_hash: block_commitment_hash.pass_through_data, }; - vlog::trace!("Block metadata {:?}", metadata); + vlog::trace!("Block metadata: {metadata:?}"); metadata } - - fn _restore_from_backup(db_config: &DBConfig) { - let backup_path = db_config.merkle_tree_backup_path(); - let mut engine = BackupEngine::open(&BackupEngineOptions::default(), backup_path) - .expect("failed to initialize restore engine"); - let rocksdb_path = db_config.path(); - if let Err(err) = engine.restore_from_latest_backup( - rocksdb_path, - rocksdb_path, - &RestoreOptions::default(), - ) { - vlog::warn!("can't restore tree from backup {:?}", err); - } - } - - fn _backup(config: &DBConfig, mode: MetadataCalculatorMode) { - let backup_latency = TreeUpdateStage::_Backup.start(); - let backup_path = config.merkle_tree_backup_path(); - let mut engine = BackupEngine::open(&BackupEngineOptions::default(), backup_path) - .expect("failed to create backup engine"); - let rocksdb_path = Self::db_path(config, mode); - let db = DB::open_for_read_only(&Options::default(), rocksdb_path, false) - .expect("failed to open db for backup"); - engine.create_new_backup(&db).unwrap(); - engine - .purge_old_backups(config.backup_count()) - .expect("failed to purge old backups"); - backup_latency.report(); - } } diff --git a/core/bin/zksync_core/src/metadata_calculator/tests.rs b/core/bin/zksync_core/src/metadata_calculator/tests.rs index e23fc4d4c317..d2a14053f05c 100644 --- a/core/bin/zksync_core/src/metadata_calculator/tests.rs +++ b/core/bin/zksync_core/src/metadata_calculator/tests.rs @@ -1,19 +1,18 @@ use assert_matches::assert_matches; use db_test_macro::db_test; use tempfile::TempDir; -use tokio::sync::watch; +use tokio::sync::{mpsc, watch}; use std::{ + future::Future, ops, panic, path::Path, - sync::mpsc, - thread, time::{Duration, Instant}, }; -use crate::genesis::{create_genesis_block, save_genesis_block_metadata}; -use crate::metadata_calculator::{MetadataCalculator, MetadataCalculatorMode, TreeImplementation}; -use zksync_config::ZkSyncConfig; +use zksync_config::configs::chain::{NetworkConfig, OperationsManagerConfig}; + +use zksync_config::DBConfig; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{CheckHealth, CheckHealthStatus}; @@ -27,133 +26,98 @@ use zksync_types::{ }; use zksync_utils::{miniblock_hash, u32_to_h256}; -const RUN_TIMEOUT: Duration = Duration::from_secs(5); +use super::{MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig}; +use crate::genesis::{create_genesis_block, save_genesis_block_metadata}; + +const RUN_TIMEOUT: Duration = Duration::from_secs(15); -fn run_with_timeout(timeout: Duration, action: F) -> T +async fn run_with_timeout(timeout: Duration, action: F) -> T where - T: Send + 'static, - F: FnOnce() -> T + Send + 'static, + F: Future, { - let (termination_sx, termination_rx) = mpsc::channel(); - let join_handle = thread::spawn(move || { - termination_sx.send(action()).ok(); - }); - let output = termination_rx - .recv_timeout(timeout) - .expect("timed out waiting for metadata calculator"); - match join_handle.join() { - Ok(()) => output, - Err(panic_object) => panic::resume_unwind(panic_object), + let timeout_handle = tokio::time::timeout(timeout, action); + match timeout_handle.await { + Ok(res) => res, + Err(_) => panic!("timed out waiting for metadata calculator"), } } -fn test_genesis_creation(pool: &ConnectionPool, implementation: TreeImplementation) { +#[db_test] +async fn genesis_creation(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; assert!(calculator.tree_tag().starts_with("full")); - run_calculator(calculator, pool.clone()); - let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + run_calculator(calculator, pool.clone(), prover_pool).await; + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; assert_eq!(calculator.updater.tree().block_number(), 1); } #[db_test] -async fn genesis_creation(pool: ConnectionPool) { - test_genesis_creation(&pool, TreeImplementation::Old); -} - -#[db_test] -async fn genesis_creation_with_new_tree(pool: ConnectionPool) { - test_genesis_creation(&pool, TreeImplementation::New); -} - -fn test_basic_workflow( - pool: &ConnectionPool, - implementation: TreeImplementation, -) -> PrepareBasicCircuitsJob { +async fn basic_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, object_store) = setup_calculator(temp_dir.path(), pool, implementation); - reset_db_state(pool, 1); - run_calculator(calculator, pool.clone()); + let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool).await; + reset_db_state(&pool, 1).await; + run_calculator(calculator, pool.clone(), prover_pool).await; - let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).unwrap(); + let job: PrepareBasicCircuitsJob = object_store.get(L1BatchNumber(1)).await.unwrap(); assert!(job.next_enumeration_index() > 0); let merkle_paths: Vec<_> = job.clone().into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 100); // ^ The exact values depend on ops in genesis block assert!(merkle_paths.iter().all(|log| log.is_write)); - let (calculator, _) = setup_calculator(temp_dir.path(), pool, implementation); + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; assert_eq!(calculator.updater.tree().block_number(), 2); - job -} - -#[db_test] -async fn basic_workflow(pool: ConnectionPool) { - let old_job = test_basic_workflow(&pool, TreeImplementation::Old); - let new_job = test_basic_workflow(&pool, TreeImplementation::New); - assert_jobs_eq(old_job, new_job); -} - -fn assert_jobs_eq(old_job: PrepareBasicCircuitsJob, new_job: PrepareBasicCircuitsJob) { - assert_eq!( - old_job.next_enumeration_index(), - new_job.next_enumeration_index() - ); - let old_merkle_paths = old_job.into_merkle_paths(); - let new_merkle_paths = new_job.into_merkle_paths(); - assert_eq!(old_merkle_paths.len(), new_merkle_paths.len()); - for (old_path, new_path) in old_merkle_paths.zip(new_merkle_paths) { - assert_eq!(old_path, new_path); - } } #[db_test] -async fn status_receiver_has_correct_states(pool: ConnectionPool) { +async fn status_receiver_has_correct_states(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), &pool, TreeImplementation::Old); + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; let tree_health_check = calculator.tree_health_check(); assert_matches!( - tree_health_check.check_health(), + tree_health_check.check_health().await, CheckHealthStatus::NotReady(msg) if msg.contains("full") ); let other_tree_health_check = calculator.tree_health_check(); assert_matches!( - other_tree_health_check.check_health(), + other_tree_health_check.check_health().await, CheckHealthStatus::NotReady(msg) if msg.contains("full") ); - reset_db_state(&pool, 1); - run_calculator(calculator, pool); - assert_eq!(tree_health_check.check_health(), CheckHealthStatus::Ready); + reset_db_state(&pool, 1).await; + run_calculator(calculator, pool, prover_pool).await; assert_eq!( - other_tree_health_check.check_health(), + tree_health_check.check_health().await, + CheckHealthStatus::Ready + ); + assert_eq!( + other_tree_health_check.check_health().await, CheckHealthStatus::Ready ); } -fn test_multi_block_workflow( - pool: ConnectionPool, - implementation: TreeImplementation, -) -> Box { +#[db_test] +async fn multi_block_workflow(pool: ConnectionPool, prover_pool: ConnectionPool) { // Run all transactions as a single block let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, _) = setup_calculator(temp_dir.path(), &pool, implementation); - reset_db_state(&pool, 1); - let root_hash = run_calculator(calculator, pool.clone()); + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; + reset_db_state(&pool, 1).await; + let root_hash = run_calculator(calculator, pool.clone(), prover_pool.clone()).await; // Run the same transactions as multiple blocks let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool, implementation); - reset_db_state(&pool, 10); - let multi_block_root_hash = run_calculator(calculator, pool); + let (calculator, object_store) = setup_calculator(temp_dir.path(), &pool).await; + reset_db_state(&pool, 10).await; + let multi_block_root_hash = run_calculator(calculator, pool, prover_pool).await; assert_eq!(multi_block_root_hash, root_hash); let mut prev_index = None; for block_number in 1..=10 { let block_number = L1BatchNumber(block_number); - let job: PrepareBasicCircuitsJob = object_store.get(block_number).unwrap(); + let job: PrepareBasicCircuitsJob = object_store.get(block_number).await.unwrap(); let next_enumeration_index = job.next_enumeration_index(); let merkle_paths: Vec<_> = job.into_merkle_paths().collect(); assert!(!merkle_paths.is_empty() && merkle_paths.len() <= 10); @@ -167,109 +131,86 @@ fn test_multi_block_workflow( .max(); prev_index = max_leaf_index_in_block.or(prev_index); } - object_store -} - -#[db_test] -async fn multi_block_workflow(pool: ConnectionPool) { - let old_store = test_multi_block_workflow(pool.clone(), TreeImplementation::Old); - let new_store = test_multi_block_workflow(pool, TreeImplementation::New); - - for block_number in 1..=10 { - let old_job: PrepareBasicCircuitsJob = old_store.get(L1BatchNumber(block_number)).unwrap(); - let new_job: PrepareBasicCircuitsJob = new_store.get(L1BatchNumber(block_number)).unwrap(); - assert_jobs_eq(old_job, new_job); - } -} - -fn test_switch_from_old_to_new_tree_without_catchup(pool: ConnectionPool, block_count: usize) { - let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - - let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); - assert!(calculator.tree_tag().starts_with("lightweight")); - reset_db_state(&pool, block_count); - let old_root_hash = run_calculator(calculator, pool.clone()); - - let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::New); - let new_root_hash = run_calculator(calculator, pool); - assert_eq!(new_root_hash, old_root_hash); -} - -#[db_test] -async fn switching_from_old_to_new_tree_without_catchup(pool: ConnectionPool) { - test_switch_from_old_to_new_tree_without_catchup(pool, 1); -} - -#[db_test] -async fn switching_from_old_to_new_tree_in_multiple_blocks_without_catchup(pool: ConnectionPool) { - test_switch_from_old_to_new_tree_without_catchup(pool, 10); } #[db_test] -async fn switching_between_tree_impls_with_additional_blocks(pool: ConnectionPool) { +async fn running_metadata_calculator_with_additional_blocks( + pool: ConnectionPool, + prover_pool: ConnectionPool, +) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); - reset_db_state(&pool, 5); - run_calculator(calculator, pool.clone()); + let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; + reset_db_state(&pool, 5).await; + run_calculator(calculator, pool.clone(), prover_pool.clone()).await; - let mut calculator = - setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::New); + let mut calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; let (stop_sx, stop_rx) = watch::channel(false); - let (delay_sx, delay_rx) = mpsc::channel(); + let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; let calculator_handle = { let pool = pool.clone(); - thread::spawn(move || calculator.run(&pool, stop_rx)) + let prover_pool = prover_pool.clone(); + tokio::task::spawn(calculator.run(pool, prover_pool, stop_rx)) }; // Wait until the calculator has processed initial blocks. - let (block_count, _) = delay_rx - .recv_timeout(RUN_TIMEOUT) - .expect("metadata calculator timed out processing initial blocks"); + let (block_count, _) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + .await + .expect("metadata calculator timed out processing initial blocks") + .unwrap(); assert_eq!(block_count, 6); // Add some new blocks to the storage. let new_logs = gen_storage_logs(100..200, 10); - extend_db_state(&mut pool.access_storage_blocking(), new_logs); + extend_db_state( + &mut pool.access_storage_tagged("metadata_calculator").await, + new_logs, + ) + .await; // Wait until these blocks are processed. The calculator may have spurious delays, // thus we wait in a loop. let updated_root_hash = loop { - let (block_count, root_hash) = delay_rx - .recv_timeout(RUN_TIMEOUT) - .expect("metadata calculator shut down prematurely"); + let (block_count, root_hash) = tokio::time::timeout(RUN_TIMEOUT, delay_rx.recv()) + .await + .expect("metadata calculator shut down prematurely") + .unwrap(); if block_count == 16 { stop_sx.send(true).unwrap(); // Shut down the calculator. break root_hash; } }; - run_with_timeout(RUN_TIMEOUT, || calculator_handle.join()).unwrap(); - - // Switch back to the old implementation. It should process new blocks independently - // and result in the same tree root hash. - let calculator = setup_lightweight_calculator(temp_dir.path(), &pool, TreeImplementation::Old); - let root_hash_for_old_tree = run_calculator(calculator, pool); - assert_eq!(root_hash_for_old_tree, updated_root_hash); + tokio::time::timeout(RUN_TIMEOUT, calculator_handle) + .await + .expect("timed out waiting for calculator") + .unwrap(); + + // Switch to the full tree. It should pick up from the same spot and result in the same tree root hash. + let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; + let root_hash_for_full_tree = run_calculator(calculator, pool, prover_pool).await; + assert_eq!(root_hash_for_full_tree, updated_root_hash); } #[db_test] -async fn throttling_new_tree(pool: ConnectionPool) { +async fn throttling_tree(pool: ConnectionPool, prover_pool: ConnectionPool) { let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut config = create_config(temp_dir.path()); - config.db.new_merkle_tree_throttle_ms = 100; + let (mut db_config, operation_config, eth) = create_config(temp_dir.path()); + db_config.new_merkle_tree_throttle_ms = 100; let mut calculator = setup_calculator_with_options( - &config, + &db_config, + &operation_config, + ð, &pool, - &ObjectStoreFactory::mock(), - MetadataCalculatorMode::Lightweight(TreeImplementation::New), - ); - let (delay_sx, delay_rx) = mpsc::channel(); + MetadataCalculatorModeConfig::Lightweight, + ) + .await; + let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.throttler.delay_notifier = delay_sx; - reset_db_state(&pool, 5); + reset_db_state(&pool, 5).await; let start = Instant::now(); - run_calculator(calculator, pool); + run_calculator(calculator, pool, prover_pool).await; let elapsed = start.elapsed(); assert!(elapsed >= Duration::from_millis(100), "{:?}", elapsed); @@ -279,50 +220,75 @@ async fn throttling_new_tree(pool: ConnectionPool) { delay_rx.try_recv().unwrap_err(); } -fn setup_calculator( +#[db_test] +async fn shutting_down_calculator(pool: ConnectionPool, prover_pool: ConnectionPool) { + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let (mut db_config, mut operation_config, eth) = create_config(temp_dir.path()); + operation_config.delay_interval = 30_000; // ms; chosen to be larger than `RUN_TIMEOUT` + db_config.new_merkle_tree_throttle_ms = 30_000; + + let calculator = setup_calculator_with_options( + &db_config, + &operation_config, + ð, + &pool, + MetadataCalculatorModeConfig::Lightweight, + ) + .await; + + reset_db_state(&pool, 5).await; + + let (stop_sx, stop_rx) = watch::channel(false); + let calculator_task = tokio::spawn(calculator.run(pool, prover_pool, stop_rx)); + tokio::time::sleep(Duration::from_millis(100)).await; + stop_sx.send_replace(true); + run_with_timeout(RUN_TIMEOUT, calculator_task) + .await + .unwrap(); +} + +async fn setup_calculator( db_path: &Path, pool: &ConnectionPool, - implementation: TreeImplementation, ) -> (MetadataCalculator, Box) { - let store_factory = ObjectStoreFactory::mock(); - let config = create_config(db_path); - let mode = MetadataCalculatorMode::Full(implementation); - let calculator = setup_calculator_with_options(&config, pool, &store_factory, mode); - (calculator, store_factory.create_store()) + let store_factory = &ObjectStoreFactory::mock(); + let (db_config, operation_manager, eth) = create_config(db_path); + let mode = MetadataCalculatorModeConfig::Full { store_factory }; + let calculator = + setup_calculator_with_options(&db_config, &operation_manager, ð, pool, mode).await; + (calculator, store_factory.create_store().await) } -fn setup_lightweight_calculator( - db_path: &Path, - pool: &ConnectionPool, - implementation: TreeImplementation, -) -> MetadataCalculator { - let mode = MetadataCalculatorMode::Lightweight(implementation); - let config = create_config(db_path); - setup_calculator_with_options(&config, pool, &ObjectStoreFactory::mock(), mode) +async fn setup_lightweight_calculator(db_path: &Path, pool: &ConnectionPool) -> MetadataCalculator { + let mode = MetadataCalculatorModeConfig::Lightweight; + let (db_config, operation_config, eth) = create_config(db_path); + setup_calculator_with_options(&db_config, &operation_config, ð, pool, mode).await } -fn create_config(db_path: &Path) -> ZkSyncConfig { - let mut config = ZkSyncConfig::from_env(); - config.chain.operations_manager.delay_interval = 50; // ms - config.db.path = path_to_string(db_path); - config.db.merkle_tree_fast_ssd_path = path_to_string(&db_path.join("old")); - config.db.new_merkle_tree_ssd_path = path_to_string(&db_path.join("new")); - config.db.backup_interval_ms = 0; - config +fn create_config(db_path: &Path) -> (DBConfig, OperationsManagerConfig, NetworkConfig) { + let mut db_config = DBConfig::from_env(); + let mut operation_config = OperationsManagerConfig::from_env(); + let eth_config = NetworkConfig::from_env(); + operation_config.delay_interval = 50; // ms + db_config.new_merkle_tree_ssd_path = path_to_string(&db_path.join("new")); + db_config.backup_interval_ms = 0; + (db_config, operation_config, eth_config) } -fn setup_calculator_with_options( - config: &ZkSyncConfig, +async fn setup_calculator_with_options( + db_config: &DBConfig, + operation_config: &OperationsManagerConfig, + eth: &NetworkConfig, pool: &ConnectionPool, - store_factory: &ObjectStoreFactory, - mode: MetadataCalculatorMode, + mode: MetadataCalculatorModeConfig<'_>, ) -> MetadataCalculator { - let store_factory = matches!(mode, MetadataCalculatorMode::Full(_)).then_some(store_factory); - let metadata_calculator = MetadataCalculator::new(config, store_factory, mode); + let calculator_config = + MetadataCalculatorConfig::for_main_node(db_config, operation_config, mode); + let metadata_calculator = MetadataCalculator::new(&calculator_config).await; - let mut storage = pool.access_storage_blocking(); - if storage.blocks_dal().is_genesis_needed() { - let chain_id = L2ChainId(config.chain.eth.zksync_network_id); + let mut storage = pool.access_storage_tagged("metadata_calculator").await; + if storage.blocks_dal().is_genesis_needed().await { + let chain_id = L2ChainId(eth.zksync_network_id); let base_system_contracts = BaseSystemContracts::load_from_disk(); let block_commitment = BlockCommitment::new( vec![], @@ -335,13 +301,14 @@ fn setup_calculator_with_options( ); let fee_address = Address::repeat_byte(0x01); - create_genesis_block(&mut storage, fee_address, chain_id, base_system_contracts); + create_genesis_block(&mut storage, fee_address, chain_id, base_system_contracts).await; save_genesis_block_metadata( &mut storage, &block_commitment, metadata_calculator.updater.tree().root_hash(), 1, - ); + ) + .await; } metadata_calculator } @@ -350,42 +317,54 @@ fn path_to_string(path: &Path) -> String { path.to_str().unwrap().to_owned() } -fn run_calculator(mut calculator: MetadataCalculator, pool: ConnectionPool) -> H256 { +async fn run_calculator( + mut calculator: MetadataCalculator, + pool: ConnectionPool, + prover_pool: ConnectionPool, +) -> H256 { let (stop_sx, stop_rx) = watch::channel(false); - let (delay_sx, delay_rx) = mpsc::channel(); + let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); calculator.delayer.delay_notifier = delay_sx; - let delayer_handle = thread::spawn(move || { + let delayer_handle = tokio::spawn(async move { // Wait until the calculator has processed all initially available blocks, // then stop it via signal. let (_, root_hash) = delay_rx .recv() + .await .expect("metadata calculator shut down prematurely"); stop_sx.send(true).unwrap(); root_hash }); - run_with_timeout(RUN_TIMEOUT, move || calculator.run(&pool, stop_rx)); - delayer_handle.join().unwrap() + run_with_timeout(RUN_TIMEOUT, calculator.run(pool, prover_pool, stop_rx)).await; + delayer_handle.await.unwrap() } -fn reset_db_state(pool: &ConnectionPool, num_blocks: usize) { - let mut storage = pool.access_storage_blocking(); +async fn reset_db_state(pool: &ConnectionPool, num_blocks: usize) { + let mut storage = pool.access_storage_tagged("metadata_calculator").await; // Drops all blocks (except the block with number = 0) and their storage logs. storage .storage_logs_dal() - .rollback_storage_logs(MiniblockNumber(0)); - storage.blocks_dal().delete_miniblocks(MiniblockNumber(0)); - storage.blocks_dal().delete_l1_batches(L1BatchNumber(0)); + .rollback_storage_logs(MiniblockNumber(0)) + .await; + storage + .blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + storage + .blocks_dal() + .delete_l1_batches(L1BatchNumber(0)) + .await; let logs = gen_storage_logs(0..100, num_blocks); - extend_db_state(&mut storage, logs); + extend_db_state(&mut storage, logs).await; } -fn extend_db_state( +async fn extend_db_state( storage: &mut StorageProcessor<'_>, new_logs: impl IntoIterator>, ) { - let next_block = storage.blocks_dal().get_sealed_block_number().0 + 1; + let next_block = storage.blocks_dal().get_sealed_block_number().await.0 + 1; let base_system_contracts = BaseSystemContracts::load_from_disk(); for (idx, block_logs) in (next_block..).zip(new_logs) { @@ -414,14 +393,20 @@ fn extend_db_state( storage .blocks_dal() - .insert_l1_batch(header, BlockGasCount::default()); - storage.blocks_dal().insert_miniblock(miniblock_header); + .insert_l1_batch(&header, BlockGasCount::default()) + .await; + storage + .blocks_dal() + .insert_miniblock(&miniblock_header) + .await; storage .storage_logs_dal() - .insert_storage_logs(miniblock_number, &[(H256::zero(), block_logs)]); + .insert_storage_logs(miniblock_number, &[(H256::zero(), block_logs)]) + .await; storage .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(block_number); + .mark_miniblocks_as_executed_in_l1_batch(block_number) + .await; } } diff --git a/core/bin/zksync_core/src/metadata_calculator/updater.rs b/core/bin/zksync_core/src/metadata_calculator/updater.rs index d7917c69c852..8b02e85de754 100644 --- a/core/bin/zksync_core/src/metadata_calculator/updater.rs +++ b/core/bin/zksync_core/src/metadata_calculator/updater.rs @@ -4,25 +4,23 @@ use tokio::sync::watch; use std::time::Instant; -use zksync_config::DBConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_merkle_tree::ZkSyncTree as OldTree; -use zksync_merkle_tree2::domain::ZkSyncTree as NewTree; +use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::ObjectStore; -use zksync_storage::RocksDB; +use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{block::WitnessBlockWithLogs, L1BatchNumber}; use super::{ get_logs_for_l1_batch, - helpers::{Delayer, ZkSyncTree}, + helpers::{AsyncTree, Delayer}, metrics::TreeUpdateStage, - MetadataCalculator, MetadataCalculatorMode, MetadataCalculatorStatus, TreeImplementation, + MetadataCalculator, MetadataCalculatorMode, MetadataCalculatorStatus, }; #[derive(Debug)] pub(super) struct TreeUpdater { mode: MetadataCalculatorMode, - tree: ZkSyncTree, + tree: AsyncTree, max_block_batch: usize, object_store: Option>, } @@ -30,30 +28,21 @@ pub(super) struct TreeUpdater { impl TreeUpdater { pub fn new( mode: MetadataCalculatorMode, - db: RocksDB, - config: &DBConfig, + db_path: &str, + max_block_batch: usize, object_store: Option>, ) -> Self { - use self::TreeImplementation::{New, Old}; + assert!( + max_block_batch > 0, + "Maximum block batch is misconfigured to be 0; please update it to positive value" + ); - let tree = match mode { - MetadataCalculatorMode::Full(Old) => ZkSyncTree::Old(OldTree::new(db)), - MetadataCalculatorMode::Full(New) => ZkSyncTree::New(NewTree::new(db)), - MetadataCalculatorMode::Lightweight(Old) => { - ZkSyncTree::Old(OldTree::new_lightweight(db)) - } - MetadataCalculatorMode::Lightweight(New) => { - ZkSyncTree::New(NewTree::new_lightweight(db)) - } - }; + let db = Self::create_db(db_path); + let tree = AsyncTree::new(match mode { + MetadataCalculatorMode::Full => ZkSyncTree::new(db), + MetadataCalculatorMode::Lightweight => ZkSyncTree::new_lightweight(db), + }); - let max_block_batch = if matches!(mode, MetadataCalculatorMode::Lightweight(Old)) { - // The old tree implementation does not support processing multiple blocks - // in the lightweight mode. - 1 - } else { - config.max_block_batch - }; Self { mode, tree, @@ -62,8 +51,19 @@ impl TreeUpdater { } } + fn create_db(path: &str) -> RocksDB { + let db = RocksDB::new(path, true); + if cfg!(test) { + // We need sync writes for the unit tests to execute reliably. With the default config, + // some writes to RocksDB may occur, but not be visible to the test code. + db.with_sync_writes() + } else { + db + } + } + #[cfg(test)] - pub fn tree(&self) -> &ZkSyncTree { + pub fn tree(&self) -> &AsyncTree { &self.tree } @@ -71,134 +71,170 @@ impl TreeUpdater { self.mode } - #[tracing::instrument(skip(self, storage, blocks))] - fn process_multiple_blocks( + async fn process_multiple_blocks( &mut self, storage: &mut StorageProcessor<'_>, + prover_storage: &mut StorageProcessor<'_>, blocks: Vec, ) { let start = Instant::now(); let compute_latency = TreeUpdateStage::Compute.start(); let total_logs: usize = blocks.iter().map(|block| block.storage_logs.len()).sum(); - let storage_logs = blocks.iter().map(|block| block.storage_logs.as_slice()); + if let (Some(first), Some(last)) = (blocks.first(), blocks.last()) { + let l1_batch_numbers = first.header.number.0..=last.header.number.0; + vlog::info!("Processing L1 batches #{l1_batch_numbers:?} with {total_logs} total logs"); + }; + let (storage_logs, block_headers): (Vec<_>, Vec<_>) = blocks + .into_iter() + .map(|block| (block.storage_logs, block.header)) + .unzip(); let mut previous_root_hash = self.tree.root_hash(); - let metadata = self.tree.process_blocks(storage_logs); + let metadata = self.tree.process_blocks(storage_logs).await; compute_latency.report(); - let mut block_headers = Vec::with_capacity(blocks.len()); - for (mut metadata_at_block, block) in metadata.into_iter().zip(blocks) { + let mut updated_headers = Vec::with_capacity(block_headers.len()); + for (mut metadata_at_block, block_header) in metadata.into_iter().zip(block_headers) { let prepare_results_latency = TreeUpdateStage::PrepareResults.start(); - let witness_input = metadata_at_block.witness_input.take(); + let witness_input = metadata_at_block.witness.take(); let next_root_hash = metadata_at_block.root_hash; let metadata = - MetadataCalculator::build_block_metadata(metadata_at_block, &block.header); + MetadataCalculator::build_block_metadata(metadata_at_block, &block_header); prepare_results_latency.report(); let block_with_metadata = - MetadataCalculator::reestimate_block_commit_gas(storage, block.header, metadata); + MetadataCalculator::reestimate_block_commit_gas(storage, block_header, metadata) + .await; let block_number = block_with_metadata.header.number; - let object_key = self.object_store.as_ref().map(|object_store| { + let object_key = if let Some(object_store) = &self.object_store { let witness_input = witness_input.expect("No witness input provided by tree; this is a bug"); + let save_witnesses_latency = TreeUpdateStage::SaveWitnesses.start(); + let object_key = object_store + .put(block_number, &witness_input) + .await + .unwrap(); + save_witnesses_latency.report(); - TreeUpdateStage::SaveWitnesses - .run(|| object_store.put(block_number, &witness_input).unwrap()) - }); + vlog::info!( + "Saved witnesses for L1 batch #{block_number} to object storage at `{object_key}`" + ); + Some(object_key) + } else { + None + }; // Save the metadata in case the lightweight tree is behind / not running - let metadata = block_with_metadata.metadata; - TreeUpdateStage::SavePostgres.run(|| { - storage.blocks_dal().save_blocks_metadata( - block_number, - metadata, - previous_root_hash, - ); - // ^ Note that `save_blocks_metadata()` will not blindly overwrite changes if the block - // metadata already exists; instead, it'll check that the old an new metadata match. - // That is, if we run both tree implementations, we'll get metadata correspondence - // right away without having to implement dedicated code. - - if let Some(object_key) = &object_key { - storage - .witness_generator_dal() - .save_witness_inputs(block_number, object_key); - } - }); + let metadata = &block_with_metadata.metadata; + let save_postgres_latency = TreeUpdateStage::SavePostgres.start(); + storage + .blocks_dal() + .save_blocks_metadata(block_number, metadata, previous_root_hash) + .await; + // ^ Note that `save_blocks_metadata()` will not blindly overwrite changes if the block + // metadata already exists; instead, it'll check that the old an new metadata match. + // That is, if we run both tree implementations, we'll get metadata correspondence + // right away without having to implement dedicated code. + + if let Some(object_key) = &object_key { + prover_storage + .witness_generator_dal() + .save_witness_inputs(block_number, object_key) + .await; + prover_storage + .fri_witness_generator_dal() + .save_witness_inputs(block_number, object_key) + .await; + } + save_postgres_latency.report(); + vlog::info!("Updated metadata for L1 batch #{block_number} in Postgres"); previous_root_hash = next_root_hash; - block_headers.push(block_with_metadata.header); + updated_headers.push(block_with_metadata.header); } - TreeUpdateStage::SaveRocksDB.run(|| self.tree.save()); - MetadataCalculator::update_metrics(self.mode, &block_headers, total_logs, start); + let save_rocksdb_latency = TreeUpdateStage::SaveRocksDB.start(); + self.tree.save().await; + save_rocksdb_latency.report(); + MetadataCalculator::update_metrics(self.mode, &updated_headers, total_logs, start); } - fn tree_implementation(&self) -> TreeImplementation { - match &self.tree { - ZkSyncTree::Old(_) => TreeImplementation::Old, - ZkSyncTree::New(_) => TreeImplementation::New, + async fn step( + &mut self, + mut storage: StorageProcessor<'_>, + mut prover_storage: StorageProcessor<'_>, + next_block_to_seal: &mut L1BatchNumber, + ) { + let load_changes_latency = TreeUpdateStage::LoadChanges.start(); + let last_sealed_block = storage.blocks_dal().get_sealed_block_number().await; + let last_requested_block = next_block_to_seal.0 + self.max_block_batch as u32 - 1; + let last_requested_block = last_requested_block.min(last_sealed_block.0); + let block_numbers = next_block_to_seal.0..=last_requested_block; + if block_numbers.is_empty() { + vlog::trace!( + "No blocks to seal: block numbers range to be loaded {block_numbers:?} is empty" + ); + } else { + vlog::info!("Loading blocks with numbers {block_numbers:?} to update Merkle tree"); } - } - fn step(&mut self, mut storage: StorageProcessor<'_>, next_block_to_seal: &mut L1BatchNumber) { - let new_blocks: Vec<_> = TreeUpdateStage::LoadChanges.run(|| { - let last_sealed_block = storage.blocks_dal().get_sealed_block_number(); - (next_block_to_seal.0..=last_sealed_block.0) - .map(L1BatchNumber) - .take(self.max_block_batch) - .flat_map(|block_number| get_logs_for_l1_batch(&mut storage, block_number)) - .collect() - }); + let mut new_blocks = vec![]; + for block_number in block_numbers { + let logs = get_logs_for_l1_batch(&mut storage, L1BatchNumber(block_number)).await; + new_blocks.extend(logs); + } + load_changes_latency.report(); if let Some(last_block) = new_blocks.last() { *next_block_to_seal = last_block.header.number + 1; - self.process_multiple_blocks(&mut storage, new_blocks); + self.process_multiple_blocks(&mut storage, &mut prover_storage, new_blocks) + .await; } } /// The processing loop for this updater. - pub fn loop_updating_tree( + pub async fn loop_updating_tree( mut self, delayer: Delayer, throttler: Delayer, pool: &ConnectionPool, + prover_pool: &ConnectionPool, mut stop_receiver: watch::Receiver, status_sender: watch::Sender, ) { - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("metadata_calculator").await; // Ensure genesis creation let tree = &mut self.tree; if tree.is_empty() { - let Some(logs) = get_logs_for_l1_batch(&mut storage, L1BatchNumber(0)) else { + let Some(logs) = get_logs_for_l1_batch(&mut storage, L1BatchNumber(0)).await else { panic!("Missing storage logs for the genesis block"); }; - tree.process_block(&logs.storage_logs); - tree.save(); + tree.process_block(logs.storage_logs).await; + tree.save().await; } let mut next_block_to_seal = L1BatchNumber(tree.block_number()); - let current_db_block = storage.blocks_dal().get_sealed_block_number() + 1; - let last_block_number_with_metadata = - storage.blocks_dal().get_last_block_number_with_metadata() + 1; + let current_db_block = storage.blocks_dal().get_sealed_block_number().await + 1; + let last_block_number_with_metadata = storage + .blocks_dal() + .get_last_block_number_with_metadata() + .await + + 1; drop(storage); - let tree_tag = self.tree_implementation().as_tag(); vlog::info!( - "Initialized metadata calculator with {} tree implementation. \ - Current RocksDB block: {}. Current Postgres block: {}", - tree_tag, - next_block_to_seal, - current_db_block + "Initialized metadata calculator with {max_block_batch} max batch size. \ + Current RocksDB block: {next_block_to_seal}, current Postgres block: {current_db_block}, \ + last block with metadata: {last_block_number_with_metadata}", + max_block_batch = self.max_block_batch ); metrics::gauge!( "server.metadata_calculator.backup_lag", - (last_block_number_with_metadata - *next_block_to_seal).0 as f64, - "tree" => tree_tag + (last_block_number_with_metadata - *next_block_to_seal).0 as f64 ); status_sender.send_replace(MetadataCalculatorStatus::Ready); @@ -207,16 +243,36 @@ impl TreeUpdater { vlog::info!("Stop signal received, metadata_calculator is shutting down"); break; } - let storage = pool.access_storage_blocking(); + let storage = pool.access_storage_tagged("metadata_calculator").await; + let prover_storage = prover_pool + .access_storage_tagged("metadata_calculator") + .await; let next_block_snapshot = *next_block_to_seal; - self.step(storage, &mut next_block_to_seal); - if next_block_snapshot == *next_block_to_seal { - // We didn't make any progress. - delayer.wait(&self.tree); + self.step(storage, prover_storage, &mut next_block_to_seal) + .await; + let delay = if next_block_snapshot == *next_block_to_seal { + vlog::trace!( + "Metadata calculator (next L1 batch: #{next_block_to_seal}) \ + didn't make any progress; delaying it using {delayer:?}" + ); + delayer.wait(&self.tree) } else { - // We've made some progress; apply throttling if necessary. - throttler.wait(&self.tree); + vlog::trace!( + "Metadata calculator (next L1 batch: #{next_block_to_seal}) \ + made progress from #{next_block_snapshot}; throttling it using {throttler:?}" + ); + throttler.wait(&self.tree) + }; + + // The delays we're operating with are reasonably small, but selecting between the delay + // and the stop receiver still allows to be more responsive during shutdown. + tokio::select! { + _ = stop_receiver.changed() => { + vlog::info!("Stop signal received, metadata_calculator is shutting down"); + break; + } + () = delay => { /* The delay has passed */ } } } } diff --git a/core/bin/zksync_core/src/reorg_detector/mod.rs b/core/bin/zksync_core/src/reorg_detector/mod.rs index ab1ab015930e..d49ed276cf81 100644 --- a/core/bin/zksync_core/src/reorg_detector/mod.rs +++ b/core/bin/zksync_core/src/reorg_detector/mod.rs @@ -1,8 +1,9 @@ use std::time::Duration; use zksync_web3_decl::{ - jsonrpsee::core::{error::Error as RpcError, RpcResult}, + jsonrpsee::core::Error as RpcError, jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::ZksNamespaceClient, + RpcResult, }; use zksync_dal::ConnectionPool; @@ -43,9 +44,11 @@ impl ReorgDetector { // Unwrapping is fine since the caller always checks that these root hashes exist. let local_hash = self .pool - .access_storage_blocking() + .access_storage() + .await .blocks_dal() .get_block_state_root(block_number) + .await .unwrap_or_else(|| { panic!("Root hash does not exist for local batch #{}", block_number) }); @@ -122,9 +125,11 @@ impl ReorgDetector { loop { let sealed_block_number = self .pool - .access_storage_blocking() + .access_storage() + .await .blocks_dal() - .get_last_block_number_with_metadata(); + .get_last_block_number_with_metadata() + .await; // If the main node has to catch up with us, we should not do anything just yet. if self diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs index 83caa96ca9e3..0744229e9157 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -1,7 +1,12 @@ -use std::{sync::mpsc, thread, time::Instant}; +use async_trait::async_trait; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinHandle, +}; + +use std::{collections::HashSet, fmt, time::Instant}; use vm::{ - storage::Storage, vm::{VmPartialExecutionResult, VmTxExecutionResult}, vm_with_bootloader::{ init_vm, init_vm_with_gas_limit, push_transaction_to_bootloader_memory, BootloaderJobType, @@ -10,21 +15,18 @@ use vm::{ HistoryEnabled, HistoryMode, TxRevertReason, VmBlockResult, VmInstance, }; use zksync_dal::ConnectionPool; -use zksync_state::{secondary_storage::SecondaryStateStorage, storage_view::StorageView}; -use zksync_storage::{db::Database, RocksDB}; +use zksync_state::{RocksdbStorage, StorageView}; use zksync_types::{tx::ExecutionMetrics, Transaction, U256}; use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; -use itertools::Itertools; - -use crate::gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}; -use crate::state_keeper::io::L1BatchParams; - -use crate::state_keeper::types::ExecutionMetricsForCriteria; - #[cfg(test)] mod tests; +use crate::{ + gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}, + state_keeper::{io::L1BatchParams, types::ExecutionMetricsForCriteria}, +}; + /// Representation of a transaction executed in the virtual machine. #[derive(Debug, Clone)] pub(crate) enum TxExecutionResult { @@ -46,26 +48,26 @@ pub(crate) enum TxExecutionResult { impl TxExecutionResult { /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. - pub(super) fn err(&self) -> Option { + pub(super) fn err(&self) -> Option<&TxRevertReason> { match self { - TxExecutionResult::Success { .. } => None, - TxExecutionResult::RejectedByVm { rejection_reason } => Some(rejection_reason.clone()), - TxExecutionResult::BootloaderOutOfGasForTx - | TxExecutionResult::BootloaderOutOfGasForBlockTip { .. } => { - Some(TxRevertReason::BootloaderOutOfGas) + Self::Success { .. } => None, + Self::RejectedByVm { rejection_reason } => Some(rejection_reason), + Self::BootloaderOutOfGasForTx | Self::BootloaderOutOfGasForBlockTip { .. } => { + Some(&TxRevertReason::BootloaderOutOfGas) } } } } /// An abstraction that allows us to create different kinds of batch executors. -/// The only requirement is to return the `BatchExecutorHandle` object, which does its work +/// The only requirement is to return a [`BatchExecutorHandle`], which does its work /// by communicating with the externally initialized thread. -pub trait L1BatchExecutorBuilder: 'static + std::fmt::Debug + Send { - fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle; +#[async_trait] +pub trait L1BatchExecutorBuilder: 'static + Send + Sync + fmt::Debug { + async fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle; } -/// The default implementation of the `BatchExecutorBuilder`. +/// The default implementation of [`L1BatchExecutorBuilder`]. /// Creates a "real" batch executor which maintains the VM (as opposed to the test builder which doesn't use the VM). #[derive(Debug, Clone)] pub struct MainBatchExecutorBuilder { @@ -94,17 +96,14 @@ impl MainBatchExecutorBuilder { } } +#[async_trait] impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { - fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle { - let secondary_storage = self - .pool - .access_storage_blocking() - .storage_load_dal() - .load_secondary_storage(RocksDB::new( - Database::StateKeeper, - &self.state_keeper_db_path, - true, - )); + async fn init_batch(&self, l1_batch_params: L1BatchParams) -> BatchExecutorHandle { + let mut secondary_storage = RocksdbStorage::new(self.state_keeper_db_path.as_ref()); + let mut conn = self.pool.access_storage_tagged("state_keeper").await; + secondary_storage.update_from_postgres(&mut conn).await; + drop(conn); + vlog::info!( "Secondary storage for batch {} initialized, size is {}", l1_batch_params @@ -112,11 +111,11 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { .inner_block_context() .context .block_number, - secondary_storage.get_estimated_map_size() + secondary_storage.estimated_map_size() ); metrics::gauge!( "server.state_keeper.storage_map_size", - secondary_storage.get_estimated_map_size() as f64, + secondary_storage.estimated_map_size() as f64, ); BatchExecutorHandle::new( self.save_call_traces, @@ -134,7 +133,7 @@ impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { /// the batches. #[derive(Debug)] pub struct BatchExecutorHandle { - handle: thread::JoinHandle<()>, + handle: JoinHandle<()>, commands: mpsc::Sender, } @@ -143,11 +142,13 @@ impl BatchExecutorHandle { save_call_traces: bool, max_allowed_tx_gas_limit: U256, validation_computational_gas_limit: u32, - secondary_storage: SecondaryStateStorage, + secondary_storage: RocksdbStorage, l1_batch_params: L1BatchParams, vm_gas_limit: Option, ) -> Self { - let (commands_sender, commands_receiver) = mpsc::channel(); + // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued + // until a previous command is processed), capacity 1 is enough for the commands channel. + let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = BatchExecutor { save_call_traces, max_allowed_tx_gas_limit, @@ -156,8 +157,8 @@ impl BatchExecutorHandle { vm_gas_limit, }; - let handle = thread::spawn(move || executor.run(secondary_storage, l1_batch_params)); - + let handle = + tokio::task::spawn_blocking(move || executor.run(secondary_storage, l1_batch_params)); Self { handle, commands: commands_sender, @@ -166,50 +167,79 @@ impl BatchExecutorHandle { /// Creates a batch executor handle from the provided sender and thread join handle. /// Can be used to inject an alternative batch executor implementation. - pub(crate) fn from_raw( - handle: thread::JoinHandle<()>, - commands: mpsc::Sender, - ) -> Self { + #[cfg(test)] + pub(super) fn from_raw(handle: JoinHandle<()>, commands: mpsc::Sender) -> Self { Self { handle, commands } } - pub(super) fn execute_tx(&self, tx: Transaction) -> TxExecutionResult { - let (response_sender, response_receiver) = mpsc::sync_channel(0); + pub(super) async fn execute_tx(&self, tx: Transaction) -> TxExecutionResult { + let tx_gas_limit = tx.gas_limit().as_u32(); + + let (response_sender, response_receiver) = oneshot::channel(); self.commands - .send(Command::ExecuteTx(tx, response_sender)) + .send(Command::ExecuteTx(Box::new(tx), response_sender)) + .await .unwrap(); let start = Instant::now(); - let res = response_receiver.recv().unwrap(); - metrics::histogram!("state_keeper.batch_executor.command_response_time", start.elapsed(), "command" => "execute_tx"); + let res = response_receiver.await.unwrap(); + let elapsed = start.elapsed(); + + metrics::histogram!("state_keeper.batch_executor.command_response_time", elapsed, "command" => "execute_tx"); + + if let TxExecutionResult::Success { tx_metrics, .. } = res { + metrics::histogram!( + "state_keeper.computational_gas_per_nanosecond", + tx_metrics.execution_metrics.computational_gas_used as f64 + / elapsed.as_nanos() as f64 + ); + } else { + // The amount of computational gas paid for failed transactions is hard to get + // but comparing to the gas limit makes sense, since we can burn all gas + // if some kind of failure is a DDoS vector otherwise. + metrics::histogram!( + "state_keeper.failed_tx_gas_limit_per_nanosecond", + tx_gas_limit as f64 / elapsed.as_nanos() as f64 + ); + } + res } - pub(super) fn rollback_last_tx(&self) { + pub(super) async fn rollback_last_tx(&self) { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. - let (response_sender, response_receiver) = mpsc::sync_channel(0); + let (response_sender, response_receiver) = oneshot::channel(); self.commands .send(Command::RollbackLastTx(response_sender)) + .await .unwrap(); let start = Instant::now(); - response_receiver.recv().unwrap(); + response_receiver.await.unwrap(); metrics::histogram!("state_keeper.batch_executor.command_response_time", start.elapsed(), "command" => "rollback_last_tx"); } - pub(super) fn finish_batch(self) -> VmBlockResult { - let (response_sender, response_receiver) = mpsc::sync_channel(0); + pub(super) async fn finish_batch(self) -> VmBlockResult { + let (response_sender, response_receiver) = oneshot::channel(); self.commands .send(Command::FinishBatch(response_sender)) + .await .unwrap(); let start = Instant::now(); - let resp = response_receiver.recv().unwrap(); - self.handle.join().unwrap(); + let resp = response_receiver.await.unwrap(); + self.handle.await.unwrap(); metrics::histogram!("state_keeper.batch_executor.command_response_time", start.elapsed(), "command" => "finish_batch"); resp } } +#[derive(Debug)] +pub(super) enum Command { + ExecuteTx(Box, oneshot::Sender), + RollbackLastTx(oneshot::Sender<()>), + FinishBatch(oneshot::Sender), +} + /// Implementation of the "primary" (non-test) batch executor. /// Upon launch, it initializes the VM object with provided block context and properties, and keeps applying /// transactions until the batch is sealed. @@ -225,19 +255,8 @@ pub(super) struct BatchExecutor { vm_gas_limit: Option, } -#[allow(clippy::large_enum_variant)] -pub(crate) enum Command { - ExecuteTx(Transaction, mpsc::SyncSender), - RollbackLastTx(mpsc::SyncSender<()>), - FinishBatch(mpsc::SyncSender), -} - impl BatchExecutor { - pub(super) fn run( - self, - secondary_storage: SecondaryStateStorage, - l1_batch_params: L1BatchParams, - ) { + pub(super) fn run(mut self, secondary_storage: RocksdbStorage, l1_batch_params: L1BatchParams) { vlog::info!( "Starting executing batch #{}", l1_batch_params @@ -248,9 +267,7 @@ impl BatchExecutor { ); let mut storage_view = StorageView::new(&secondary_storage); - let mut oracle_tools = - vm::OracleTools::new(&mut storage_view as &mut dyn Storage, HistoryEnabled); - + let mut oracle_tools = vm::OracleTools::new(&mut storage_view, HistoryEnabled); let mut vm = match self.vm_gas_limit { Some(vm_gas_limit) => init_vm_with_gas_limit( &mut oracle_tools, @@ -269,7 +286,7 @@ impl BatchExecutor { ), }; - while let Ok(cmd) = self.commands.recv() { + while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { let result = self.execute_tx(&tx, &mut vm); @@ -281,6 +298,21 @@ impl BatchExecutor { } Command::FinishBatch(resp) => { resp.send(self.finish_batch(&mut vm)).unwrap(); + + // storage_view cannot be accessed while borrowed by the VM, + // so this is the only point at which storage metrics can be obtained + let metrics = storage_view.metrics(); + metrics::histogram!( + "state_keeper.batch_storage_interaction_duration", + metrics.time_spent_on_get_value, + "interaction" => "get_value" + ); + metrics::histogram!( + "state_keeper.batch_storage_interaction_duration", + metrics.time_spent_on_set_value, + "interaction" => "set_value" + ); + return; } } @@ -394,30 +426,26 @@ impl BatchExecutor { // Saving the snapshot before executing vm.save_current_vm_as_snapshot(); - let compressed_bytecodes = if tx.is_l1() || tx.execute.factory_deps.is_none() { + let compressed_bytecodes = if tx.is_l1() { // For L1 transactions there are no compressed bytecodes vec![] } else { // Deduplicate and filter factory deps preserving original order. - tx.execute - .factory_deps - .as_ref() - .unwrap() - .iter() - .enumerate() - .sorted_by_key(|(_idx, dep)| *dep) - .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| { - !vm.state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(dep)) - }) - .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| CompressedBytecodeInfo::from_original(dep.clone()).ok()) - .collect() + let deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); + let storage_ptr = vm.state.storage.storage.get_ptr(); + let mut storage_ptr = storage_ptr.borrow_mut(); + let mut deps_hashes = HashSet::with_capacity(deps.len()); + let filtered_deps = deps.iter().filter_map(|bytecode| { + let bytecode_hash = hash_bytecode(bytecode); + let is_known = !deps_hashes.insert(bytecode_hash) + || storage_ptr.is_bytecode_known(&bytecode_hash); + if is_known { + None + } else { + CompressedBytecodeInfo::from_original(bytecode.clone()).ok() + } + }); + filtered_deps.collect() }; push_transaction_to_bootloader_memory( @@ -431,14 +459,13 @@ impl BatchExecutor { self.save_call_traces, )?; - let at_least_one_unpublished = compressed_bytecodes.iter().any(|info| { - !vm.state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }); + let at_least_one_unpublished = { + let storage_ptr = vm.state.storage.storage.get_ptr(); + let mut storage_ptr = storage_ptr.borrow_mut(); + compressed_bytecodes + .iter() + .any(|info| !storage_ptr.is_bytecode_known(&hash_bytecode(&info.original))) + }; if at_least_one_unpublished { // Rolling back and trying to execute one more time. @@ -458,7 +485,6 @@ impl BatchExecutor { } else { // Remove the snapshot taken at the start of this function as it is not needed anymore. vm.pop_snapshot_no_rollback(); - Ok((result_with_compression, compressed_bytecodes)) } } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 928213b00178..767926e69ad8 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -11,19 +11,18 @@ use super::TxExecutionResult; use crate::state_keeper::batch_executor::tests::tester::TestConfig; /// Ensures that the transaction was executed successfully. -fn assert_executed(execution_result: TxExecutionResult) { +fn assert_executed(execution_result: &TxExecutionResult) { assert_matches!(execution_result, TxExecutionResult::Success { .. }); } /// Ensures that the transaction was rejected by the VM. -fn assert_rejected(execution_result: TxExecutionResult) { +fn assert_rejected(execution_result: &TxExecutionResult) { assert_matches!(execution_result, TxExecutionResult::RejectedByVm { .. }); } /// Ensures that the transaction was executed successfully but reverted by the VM. -fn assert_reverted(execution_result: TxExecutionResult) { - assert_executed(execution_result.clone()); - +fn assert_reverted(execution_result: &TxExecutionResult) { + assert_executed(execution_result); if let TxExecutionResult::Success { tx_result, .. } = execution_result { assert_matches!(tx_result.status, TxExecutionStatus::Failure); } else { @@ -37,13 +36,13 @@ async fn execute_l2_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; - let res = executor.execute_tx(alice.execute()); - assert_executed(res); - executor.finish_batch(); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); + executor.finish_batch().await; } /// Checks that we can successfully execute a single L1 tx in batch executor. @@ -52,13 +51,13 @@ async fn execute_l1_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; - let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))); - assert_executed(res); - executor.finish_batch(); + let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))).await; + assert_executed(&res); + executor.finish_batch().await; } /// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. @@ -67,17 +66,17 @@ async fn execute_l2_and_l1_txs(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; - let res = executor.execute_tx(alice.execute()); - assert_executed(res); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); - let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))); - assert_executed(res); + let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))).await; + assert_executed(&res); - executor.finish_batch(); + executor.finish_batch().await; } /// Checks that we can successfully rollback the transaction and execute it once again. @@ -86,19 +85,19 @@ async fn rollback(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; let tx = alice.execute(); - let res_old = executor.execute_tx(tx.clone()); - assert_executed(res_old.clone()); + let res_old = executor.execute_tx(tx.clone()).await; + assert_executed(&res_old); - executor.rollback_last_tx(); + executor.rollback_last_tx().await; // Execute the same transaction, it must succeed. - let res_new = executor.execute_tx(tx); - assert_executed(res_new.clone()); + let res_new = executor.execute_tx(tx).await; + assert_executed(&res_new); let ( TxExecutionResult::Success { @@ -117,8 +116,7 @@ async fn rollback(connection_pool: ConnectionPool) { tx_metrics_old, tx_metrics_new, "Execution results must be the same" ); - - executor.finish_batch(); + executor.finish_batch().await; } /// Checks that incorrect transactions are marked as rejected. @@ -127,13 +125,13 @@ async fn reject_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - let executor = tester.create_batch_executor(); + tester.genesis().await; + let executor = tester.create_batch_executor().await; // Wallet is not funded, it can't pay for fees. - let res = executor.execute_tx(alice.execute()); - assert_rejected(res); - executor.finish_batch(); + let res = executor.execute_tx(alice.execute()).await; + assert_rejected(&res); + executor.finish_batch().await; } /// Checks that tx with too big gas limit is correctly rejected. @@ -142,18 +140,17 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; let bad_tx = alice.execute_with_gas_limit(u32::MAX); - let res1 = executor.execute_tx(bad_tx.clone()); - assert_rejected(res1.clone()); - - executor.rollback_last_tx(); + let res_old = executor.execute_tx(bad_tx.clone()).await; + assert_rejected(&res_old); - let res2 = executor.execute_tx(bad_tx); - assert_rejected(res2.clone()); + executor.rollback_last_tx().await; + let res_new = executor.execute_tx(bad_tx).await; + assert_rejected(&res_new); let ( TxExecutionResult::RejectedByVm { @@ -164,7 +161,7 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { rejection_reason: rejection_reason_new, .. }, - ) = (res1, res2) else { + ) = (res_old, res_new) else { unreachable!(); }; assert_eq!( @@ -174,9 +171,9 @@ async fn too_big_gas_limit(connection_pool: ConnectionPool) { // Ensure that now we can execute a valid tx. alice.nonce -= 1; // Reset the nonce. - let res3 = executor.execute_tx(alice.execute()); - assert_executed(res3); - executor.finish_batch(); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); + executor.finish_batch().await; } /// Checks that we can't execute the same transaction twice. @@ -185,18 +182,18 @@ async fn tx_cant_be_reexecuted(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; let tx = alice.execute(); - let res1 = executor.execute_tx(tx.clone()); - assert_executed(res1); + let res1 = executor.execute_tx(tx.clone()).await; + assert_executed(&res1); // Nonce is used for the second tx. - let res2 = executor.execute_tx(tx); - assert_rejected(res2); - executor.finish_batch(); + let res2 = executor.execute_tx(tx).await; + assert_rejected(&res2); + executor.finish_batch().await; } /// Checks that we can deploy and call the loadnext contract. @@ -205,23 +202,17 @@ async fn deploy_and_call_loadtest(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; let (deploy_tx, loadtest_address) = alice.deploy_loadnext_tx(); - assert_executed(executor.execute_tx(deploy_tx)); - assert_executed(executor.execute_tx(alice.loadnext_custom_gas_call( - loadtest_address, - 10, - 10000000, - ))); - assert_executed(executor.execute_tx(alice.loadnext_custom_writes_call( - loadtest_address, - 1, - 500_000_000, - ))); - executor.finish_batch(); + assert_executed(&executor.execute_tx(deploy_tx).await); + let custom_gas_tx = alice.loadnext_custom_gas_call(loadtest_address, 10, 10_000_000); + assert_executed(&executor.execute_tx(custom_gas_tx).await); + let custom_writes_tx = alice.loadnext_custom_writes_call(loadtest_address, 1, 500_000_000); + assert_executed(&executor.execute_tx(custom_writes_tx).await); + executor.finish_batch().await; } /// Checks that a tx that is reverted by the VM still can be included into a batch. @@ -230,19 +221,20 @@ async fn execute_reverted_tx(connection_pool: ConnectionPool) { let mut alice = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; let (deploy_tx, loadtest_address) = alice.deploy_loadnext_tx(); - assert_executed(executor.execute_tx(deploy_tx)); + assert_executed(&executor.execute_tx(deploy_tx).await); - assert_reverted(executor.execute_tx(alice.loadnext_custom_writes_call( + let custom_writes_tx = alice.loadnext_custom_writes_call( loadtest_address, 1, 1_000_000, // We provide enough gas for tx to be executed, but not enough for the call to be successful. - ))); - executor.finish_batch(); + ); + assert_reverted(&executor.execute_tx(custom_writes_tx).await); + executor.finish_batch().await; } /// Runs the batch executor through a semi-realistic basic scenario: @@ -253,45 +245,45 @@ async fn execute_realistic_scenario(connection_pool: ConnectionPool) { let mut bob = Account::random(); let tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - tester.fund(&[bob.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + tester.fund(&[bob.address()]).await; + let executor = tester.create_batch_executor().await; // A good tx should be executed successfully. - let res = executor.execute_tx(alice.execute()); - assert_executed(res); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); // Execute a good tx successfully, roll if back, and execute it again. let tx_to_be_rolled_back = alice.execute(); - let res = executor.execute_tx(tx_to_be_rolled_back.clone()); - assert_executed(res); + let res = executor.execute_tx(tx_to_be_rolled_back.clone()).await; + assert_executed(&res); - executor.rollback_last_tx(); + executor.rollback_last_tx().await; - let res = executor.execute_tx(tx_to_be_rolled_back.clone()); - assert_executed(res); + let res = executor.execute_tx(tx_to_be_rolled_back.clone()).await; + assert_executed(&res); // A good tx from a different account should be executed successfully. - let res = executor.execute_tx(bob.execute()); - assert_executed(res); + let res = executor.execute_tx(bob.execute()).await; + assert_executed(&res); // If we try to execute an already executed again it should be rejected. - let res = executor.execute_tx(tx_to_be_rolled_back); - assert_rejected(res); + let res = executor.execute_tx(tx_to_be_rolled_back).await; + assert_rejected(&res); // An unrelated good tx should be executed successfully. - executor.rollback_last_tx(); // Roll back the vm to the pre-rejected-tx state. + executor.rollback_last_tx().await; // Roll back the vm to the pre-rejected-tx state. // No need to reset the nonce because a tx with the current nonce was indeed executed. - let res = executor.execute_tx(alice.execute()); - assert_executed(res); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); // A good L1 tx should also be executed successfully. - let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))); - assert_executed(res); + let res = executor.execute_tx(alice.l1_execute(PriorityOpId(1))).await; + assert_executed(&res); - executor.finish_batch(); + executor.finish_batch().await; } /// Checks that we handle the bootloader out of gas error on execution phase. @@ -309,15 +301,14 @@ async fn bootloader_out_of_gas_for_any_tx(connection_pool: ConnectionPool) { }, ); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); - - let res = executor.execute_tx(alice.execute()); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; + let res = executor.execute_tx(alice.execute()).await; assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); - executor.finish_batch(); + executor.finish_batch().await; } /// Checks that we can handle the bootloader out of gas error on tip phase. @@ -328,14 +319,14 @@ async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { let mut tester = Tester::new(connection_pool); - tester.genesis(); - tester.fund(&[alice.address()]); - let executor = tester.create_batch_executor(); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let executor = tester.create_batch_executor().await; - let res = executor.execute_tx(alice.execute()); - assert_executed(res); + let res = executor.execute_tx(alice.execute()).await; + assert_executed(&res); - let vm_block_res = executor.finish_batch(); + let vm_block_res = executor.finish_batch().await; // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. @@ -346,10 +337,10 @@ async fn bootloader_tip_out_of_gas(connection_pool: ConnectionPool) { validation_computational_gas_limit: u32::MAX, }); - let second_executor = tester.create_batch_executor(); + let second_executor = tester.create_batch_executor().await; - let res = second_executor.execute_tx(alice.execute()); + let res = second_executor.execute_tx(alice.execute()).await; assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForBlockTip); - second_executor.finish_batch(); + second_executor.finish_batch().await; } diff --git a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index a2f736b0ca16..c0dba17c22da 100644 --- a/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,13 +1,8 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. -use crate::genesis::create_genesis_block; -use crate::state_keeper::{ - batch_executor::BatchExecutorHandle, - io::L1BatchParams, - tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, -}; use tempfile::TempDir; + use vm::{ test_utils::{ get_create_zksync_address, get_deploy_tx, mock_loadnext_gas_burn_call, @@ -19,10 +14,11 @@ use vm::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }, }; -use zksync_config::ZkSyncConfig; +use zksync_config::configs::chain::StateKeeperConfig; + use zksync_contracts::{get_loadnext_contract, TestContract}; use zksync_dal::ConnectionPool; -use zksync_storage::{db::Database, RocksDB}; +use zksync_state::RocksdbStorage; use zksync_types::{ ethabi::{encode, Token}, fee::Fee, @@ -35,6 +31,13 @@ use zksync_types::{ }; use zksync_utils::{test_utils::LoadnextContractExecutionParams, u256_to_h256}; +use crate::genesis::create_genesis_block; +use crate::state_keeper::{ + batch_executor::BatchExecutorHandle, + io::L1BatchParams, + tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, +}; + const DEFAULT_GAS_PER_PUBDATA: u32 = 100; const CHAIN_ID: L2ChainId = L2ChainId(270); @@ -51,16 +54,13 @@ pub(super) struct TestConfig { impl TestConfig { pub(super) fn new() -> Self { // It's OK to use env config here, since we would load the postgres URL from there anyway. - let config = ZkSyncConfig::from_env(); + let config = StateKeeperConfig::from_env(); Self { vm_gas_limit: None, save_call_traces: false, - max_allowed_tx_gas_limit: config.chain.state_keeper.max_allowed_l2_tx_gas_limit, - validation_computational_gas_limit: config - .chain - .state_keeper - .validation_computational_gas_limit, + max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit, + validation_computational_gas_limit: config.validation_computational_gas_limit, } } } @@ -95,19 +95,14 @@ impl Tester { /// Creates a batch executor instance. /// This function intentionally uses sensible defaults to not introduce boilerplate. - pub(super) fn create_batch_executor(&self) -> BatchExecutorHandle { + pub(super) async fn create_batch_executor(&self) -> BatchExecutorHandle { // Not really important for the batch executor - it operates over a single batch. let (block_context, block_properties) = self.batch_params(L1BatchNumber(1), 100); - let secondary_storage = self - .pool - .access_storage_blocking() - .storage_load_dal() - .load_secondary_storage(RocksDB::new( - Database::StateKeeper, - self.db_dir.path().to_str().unwrap(), - true, - )); + let mut secondary_storage = RocksdbStorage::new(self.db_dir.path()); + let mut conn = self.pool.access_storage_tagged("state_keeper").await; + secondary_storage.update_from_postgres(&mut conn).await; + drop(conn); // We don't use the builder because it would require us to clone the `ConnectionPool`, which is forbidden // for the test pool (see the doc-comment on `TestPool` for details). @@ -153,22 +148,23 @@ impl Tester { } /// Performs the genesis in the storage. - pub(super) fn genesis(&self) { - let mut storage = self.pool.access_storage_blocking(); - if storage.blocks_dal().is_genesis_needed() { + pub(super) async fn genesis(&self) { + let mut storage = self.pool.access_storage_tagged("state_keeper").await; + if storage.blocks_dal().is_genesis_needed().await { create_genesis_block( &mut storage, self.fee_account, CHAIN_ID, BASE_SYSTEM_CONTRACTS.clone(), - ); + ) + .await; } } /// Adds funds for specified account list. /// Expects genesis to be performed (i.e. `setup_storage` called beforehand). - pub(super) fn fund(&self, addresses: &[Address]) { - let mut storage = self.pool.access_storage_blocking(); + pub(super) async fn fund(&self, addresses: &[Address]) { + let mut storage = self.pool.access_storage_tagged("state_keeper").await; let eth_amount = U256::from(10u32).pow(U256::from(32)); //10^32 wei @@ -182,10 +178,12 @@ impl Tester { storage .storage_logs_dal() - .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), storage_logs.clone())]); + .append_storage_logs(MiniblockNumber(0), &[(H256::zero(), storage_logs.clone())]) + .await; storage .storage_dal() - .apply_storage_logs(&[(H256::zero(), storage_logs)]); + .apply_storage_logs(&[(H256::zero(), storage_logs)]) + .await; } } } diff --git a/core/bin/zksync_core/src/state_keeper/extractors.rs b/core/bin/zksync_core/src/state_keeper/extractors.rs index 7f7587b91c6b..5fe1c4e90dba 100644 --- a/core/bin/zksync_core/src/state_keeper/extractors.rs +++ b/core/bin/zksync_core/src/state_keeper/extractors.rs @@ -1,248 +1,82 @@ -//! Pure functions that convert blocks/transactions data as required by the state keeper. +//! Pure functions that convert data as required by the state keeper. -use itertools::Itertools; -use std::collections::HashMap; -use std::time::{Duration, Instant}; +use chrono::{DateTime, TimeZone, Utc}; -use vm::vm_with_bootloader::{get_bootloader_memory, BlockContextMode, TxExecutionMode}; -use vm::zk_evm::aux_structures::LogQuery; -use zksync_dal::StorageProcessor; -use zksync_types::block::DeployedContract; -use zksync_types::tx::{IncludedTxLocation, TransactionExecutionResult}; -use zksync_types::{ - l2_to_l1_log::L2ToL1Log, log::StorageLogKind, AccountTreeId, Address, ExecuteTransactionCommon, - L1BatchNumber, StorageKey, StorageLog, StorageLogQuery, StorageValue, VmEvent, - ACCOUNT_CODE_STORAGE_ADDRESS, H256, U256, +use std::{ + convert::TryFrom, + fmt, + time::{Duration, Instant}, }; -use zksync_utils::{h256_to_account_address, h256_to_u256}; - -use super::updates::{L1BatchUpdates, UpdatesManager}; - -/// Storage logs grouped by transaction hash -type StorageLogs = Vec<(H256, Vec)>; - -pub(crate) fn log_queries_to_storage_logs( - log_queries: &[StorageLogQuery], - updates_manager: &UpdatesManager, - is_fictive_miniblock: bool, -) -> StorageLogs { - log_queries - .iter() - .group_by(|log| log.log_query.tx_number_in_block) - .into_iter() - .map(|(tx_index, logs)| { - let tx_hash = if is_fictive_miniblock { - assert_eq!( - tx_index as usize, - updates_manager.pending_executed_transactions_len() - ); - H256::zero() - } else { - updates_manager.get_tx_by_index(tx_index as usize).hash() - }; - - ( - tx_hash, - logs.map(StorageLog::from_log_query) - .collect::>(), - ) - }) - .collect() -} - -pub(crate) fn write_logs_from_storage_logs(storage_logs: StorageLogs) -> StorageLogs { - storage_logs - .into_iter() - .map(|(hash, mut logs)| { - logs.retain(|log| log.kind == StorageLogKind::Write); - (hash, logs) - }) - .collect() -} - -pub(crate) fn extract_events_this_block( - vm_events: &[VmEvent], - updates_manager: &UpdatesManager, - is_fictive_miniblock: bool, -) -> Vec<(IncludedTxLocation, Vec)> { - vm_events - .iter() - .group_by(|event| event.location.1) - .into_iter() - .map(|(tx_index, events)| { - let (tx_hash, tx_initiator_address) = if is_fictive_miniblock { - assert_eq!( - tx_index as usize, - updates_manager.pending_executed_transactions_len() - ); - (H256::zero(), Address::zero()) - } else { - let tx = updates_manager.get_tx_by_index(tx_index as usize); - (tx.hash(), tx.initiator_account()) - }; - - ( - IncludedTxLocation { - tx_hash, - tx_index_in_miniblock: tx_index - - updates_manager.l1_batch.executed_transactions.len() as u32, - tx_initiator_address, - }, - events.cloned().collect::>(), - ) - }) - .collect() -} - -pub(crate) fn extract_l2_to_l1_logs_this_block( - l2_to_l1_logs: &[L2ToL1Log], - updates_manager: &UpdatesManager, - is_fictive_miniblock: bool, -) -> Vec<(IncludedTxLocation, Vec)> { - l2_to_l1_logs - .iter() - .group_by(|log| log.tx_number_in_block) - .into_iter() - .map(|(tx_index, l2_to_l1_logs)| { - let (tx_hash, tx_initiator_address) = if is_fictive_miniblock { - assert_eq!( - tx_index as usize, - updates_manager.pending_executed_transactions_len() - ); - (H256::zero(), Address::zero()) - } else { - let tx = updates_manager.get_tx_by_index(tx_index as usize); - (tx.hash(), tx.initiator_account()) - }; - - ( - IncludedTxLocation { - tx_hash, - tx_index_in_miniblock: tx_index as u32 - - updates_manager.l1_batch.executed_transactions.len() as u32, - tx_initiator_address, - }, - l2_to_l1_logs.cloned().collect::>(), - ) - }) - .collect() -} - -pub(crate) fn l1_l2_tx_count( - executed_transactions: &[TransactionExecutionResult], -) -> (usize, usize) { - let (l1_txs, l2_txs): ( - Vec<&TransactionExecutionResult>, - Vec<&TransactionExecutionResult>, - ) = executed_transactions - .iter() - .partition(|t| matches!(t.transaction.common_data, ExecuteTransactionCommon::L1(_))); - (l1_txs.len(), l2_txs.len()) -} - -pub(crate) fn get_initial_bootloader_memory( - updates_accumulator: &L1BatchUpdates, - block_context: BlockContextMode, -) -> Vec<(usize, U256)> { - let transactions_data = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.transaction.clone().into()) - .collect(); - - let refunds = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.operator_suggested_refund) - .collect(); - let compressed_bytecodes = updates_accumulator - .executed_transactions - .iter() - .map(|res| res.compressed_bytecodes.clone()) - .collect(); - - get_bootloader_memory( - transactions_data, - refunds, - compressed_bytecodes, - TxExecutionMode::VerifyExecute, - block_context, - ) -} - -pub(crate) fn storage_log_query_write_read_counts(logs: &[StorageLogQuery]) -> (usize, usize) { - let (reads, writes): (Vec<&StorageLogQuery>, Vec<&StorageLogQuery>) = - logs.iter().partition(|l| l.log_query.rw_flag); - (reads.len(), writes.len()) -} - -pub(crate) fn log_query_write_read_counts(logs: &[LogQuery]) -> (usize, usize) { - let (reads, writes): (Vec<&LogQuery>, Vec<&LogQuery>) = logs.iter().partition(|l| l.rw_flag); - (reads.len(), writes.len()) -} - -pub(crate) fn contracts_deployed_this_miniblock( - unique_storage_updates: Vec<(StorageKey, (H256, StorageValue))>, - storage: &mut StorageProcessor<'_>, -) -> Vec<(H256, Vec)> { - let mut result: HashMap> = Default::default(); +use vm::transaction_data::TransactionData; +use zksync_dal::StorageProcessor; +use zksync_types::{L1BatchNumber, Transaction, U256}; +use zksync_utils::h256_to_u256; + +/// Displays a Unix timestamp (seconds since epoch) in human-readable form. Useful for logging. +pub(super) fn display_timestamp(timestamp: u64) -> impl fmt::Display { + enum DisplayedTimestamp { + Parsed(DateTime), + Raw(u64), + } - // Each storage update in the AccountCodeStorage denotes the fact - // some contract bytecode has been deployed - unique_storage_updates - .into_iter() - .filter(|(key, _)| *key.account().address() == ACCOUNT_CODE_STORAGE_ADDRESS) - .for_each(|(code_key, (tx_hash, bytecode_hash))| { - if bytecode_hash == H256::zero() { - return; + impl fmt::Display for DisplayedTimestamp { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Parsed(timestamp) => fmt::Display::fmt(timestamp, formatter), + Self::Raw(raw) => write!(formatter, "(raw: {raw})"), } + } + } - let contract_bytecode = storage - .storage_dal() - .get_factory_dep(bytecode_hash) - .expect("Missing factory dep for deployed contract"); - - let contracts_in_tx = result.entry(tx_hash).or_insert_with(Default::default); - contracts_in_tx.push(DeployedContract { - account_id: AccountTreeId::new(h256_to_account_address(code_key.key())), - bytecode: contract_bytecode, - }); - }); - - result.into_iter().collect() + let parsed = i64::try_from(timestamp).ok(); + let parsed = parsed.and_then(|ts| Utc.timestamp_opt(ts, 0).single()); + parsed.map_or( + DisplayedTimestamp::Raw(timestamp), + DisplayedTimestamp::Parsed, + ) } -pub(crate) fn wait_for_prev_l1_batch_state_root_unchecked( +pub(crate) async fn wait_for_prev_l1_batch_params( storage: &mut StorageProcessor<'_>, number: L1BatchNumber, -) -> U256 { +) -> (U256, u64) { if number == L1BatchNumber(0) { - return U256::default(); + return (U256::default(), 0); } - wait_for_l1_batch_state_root_unchecked(storage, number - 1) + wait_for_l1_batch_params_unchecked(storage, number - 1).await } -// warning: if invoked for a `L1BatchNumber` of a non-existent l1 batch, will block current thread indefinitely -pub(crate) fn wait_for_l1_batch_state_root_unchecked( +/// # Warning +/// +/// If invoked for a `L1BatchNumber` of a non-existent l1 batch, will block current thread indefinitely. +async fn wait_for_l1_batch_params_unchecked( storage: &mut StorageProcessor<'_>, number: L1BatchNumber, -) -> U256 { +) -> (U256, u64) { // If the state root is not known yet, this duration will be used to back off in the while loops const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); let stage_started_at: Instant = Instant::now(); loop { - let root_hash = storage.blocks_dal().get_block_state_root(number); - if let Some(root) = root_hash { + let data = storage + .blocks_dal() + .get_block_state_root_and_timestamp(number) + .await; + if let Some((root_hash, timestamp)) = data { vlog::trace!( - "Waited for hash of block #{:?} took {:?}", - number.0, + "Waiting for hash of L1 batch #{number} took {:?}", stage_started_at.elapsed() ); - return h256_to_u256(root); + return (h256_to_u256(root_hash), timestamp); } - std::thread::sleep(SAFE_STATE_ROOT_INTERVAL); + tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; } } + +/// Returns size in VM words of an encoded transaction. +pub(super) fn encoded_transaction_size(tx: Transaction) -> usize { + let tx_data: TransactionData = tx.into(); + tx_data.into_tokens().len() +} diff --git a/core/bin/zksync_core/src/state_keeper/io/common.rs b/core/bin/zksync_core/src/state_keeper/io/common.rs index a725d05ec570..b3dc7a16c005 100644 --- a/core/bin/zksync_core/src/state_keeper/io/common.rs +++ b/core/bin/zksync_core/src/state_keeper/io/common.rs @@ -1,4 +1,4 @@ -use std::time::{Duration, Instant}; +use std::time::Duration; use vm::{ vm_with_bootloader::{BlockContext, BlockContextMode}, @@ -9,14 +9,8 @@ use zksync_dal::StorageProcessor; use zksync_types::{Address, L1BatchNumber, U256, ZKPORTER_IS_AVAILABLE}; use zksync_utils::h256_to_u256; -use crate::state_keeper::extractors; - use super::{L1BatchParams, PendingBatchData}; - -#[derive(Debug)] -pub(crate) struct StateKeeperStats { - pub(crate) num_contracts: u64, -} +use crate::state_keeper::extractors; /// Returns the parameters required to initialize the VM for the next L1 batch. pub(crate) fn l1_batch_params( @@ -48,26 +42,17 @@ pub(crate) fn l1_batch_params( } } -/// Runs the provided closure `f` until it returns `Some` or the `max_wait` time has elapsed. -pub(crate) fn poll_until Option>( - delay_interval: Duration, - max_wait: Duration, - mut f: F, -) -> Option { - let wait_interval = delay_interval.min(max_wait); - let start = Instant::now(); - while start.elapsed() <= max_wait { - let res = f(); - if res.is_some() { - return res; - } - std::thread::sleep(wait_interval); - } - None +/// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. +pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize { + let max_wait_millis = max_wait.as_millis() as u64; + let delay_interval_millis = delay_interval.as_millis() as u64; + assert!(delay_interval_millis > 0, "delay interval must be positive"); + + ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize } /// Loads the pending L1 block data from the database. -pub(crate) fn load_pending_batch( +pub(crate) async fn load_pending_batch( storage: &mut StorageProcessor<'_>, current_l1_batch_number: L1BatchNumber, fee_account: Address, @@ -78,25 +63,30 @@ pub(crate) fn load_pending_batch( let (_, last_miniblock_number_included_in_l1_batch) = storage .blocks_dal() .get_miniblock_range_of_l1_batch(current_l1_batch_number - 1) + .await .unwrap(); last_miniblock_number_included_in_l1_batch + 1 }; let pending_miniblock_header = storage .blocks_dal() - .get_miniblock_header(pending_miniblock_number)?; + .get_miniblock_header(pending_miniblock_number) + .await?; vlog::info!("Getting previous batch hash"); - let previous_l1_batch_hash = - extractors::wait_for_prev_l1_batch_state_root_unchecked(storage, current_l1_batch_number); + let (previous_l1_batch_hash, _) = + extractors::wait_for_prev_l1_batch_params(storage, current_l1_batch_number).await; - let base_system_contracts = storage.storage_dal().get_base_system_contracts( - pending_miniblock_header - .base_system_contracts_hashes - .bootloader, - pending_miniblock_header - .base_system_contracts_hashes - .default_aa, - ); + let base_system_contracts = storage + .storage_dal() + .get_base_system_contracts( + pending_miniblock_header + .base_system_contracts_hashes + .bootloader, + pending_miniblock_header + .base_system_contracts_hashes + .default_aa, + ) + .await; vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); let params = l1_batch_params( @@ -109,7 +99,25 @@ pub(crate) fn load_pending_batch( base_system_contracts, ); - let txs = storage.transactions_dal().get_transactions_to_reexecute(); + let txs = storage + .transactions_dal() + .get_transactions_to_reexecute() + .await; Some(PendingBatchData { params, txs }) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[rustfmt::skip] // One-line formatting looks better here. + fn test_poll_iters() { + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(0)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(100)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(101)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); + } +} diff --git a/core/bin/zksync_core/src/state_keeper/io/mempool.rs b/core/bin/zksync_core/src/state_keeper/io/mempool.rs index 351ef5bbd621..8fc0efbe9410 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mempool.rs @@ -1,4 +1,8 @@ +use async_trait::async_trait; + use std::{ + cmp, + collections::HashMap, sync::Arc, time::{Duration, Instant}, }; @@ -7,29 +11,27 @@ use vm::{ vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, DerivedBlockContext}, VmBlockResult, }; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_config::configs::chain::StateKeeperConfig; +use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, U256}; use zksync_utils::time::millis_since_epoch; -use crate::state_keeper::mempool_actor::l2_tx_filter; use crate::{ l1_gas_price::L1GasPriceProvider, state_keeper::{ extractors, io::{ - common::{l1_batch_params, poll_until, StateKeeperStats}, - seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, - L1BatchParams, PendingBatchData, StateKeeperIO, + common::{l1_batch_params, load_pending_batch, poll_iters}, + L1BatchParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, + mempool_actor::l2_tx_filter, updates::UpdatesManager, MempoolGuard, }, }; -use super::common::load_pending_batch; - /// Mempool-based IO for the state keeper. /// Receives transactions from the database through the mempool filtering logic. /// Decides which batch parameters should be used for the new batch. @@ -40,23 +42,19 @@ pub(crate) struct MempoolIO { pool: ConnectionPool, filter: L2TxFilter, current_miniblock_number: MiniblockNumber, + miniblock_sealer_handle: MiniblockSealerHandle, current_l1_batch_number: L1BatchNumber, fee_account: Address, fair_l2_gas_price: u64, delay_interval: Duration, - - // Grafana metrics - statistics: StateKeeperStats, - // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. l1_gas_price_provider: Arc, - base_system_contracts: BaseSystemContracts, + l2_erc20_bridge_addr: Address, } -impl StateKeeperIO - for MempoolIO -{ +#[async_trait] +impl StateKeeperIO for MempoolIO { fn current_l1_batch_number(&self) -> L1BatchNumber { self.current_l1_batch_number } @@ -65,11 +63,12 @@ impl StateKeepe self.current_miniblock_number } - fn load_pending_batch(&mut self) -> Option { - let mut storage = self.pool.access_storage_blocking(); + async fn load_pending_batch(&mut self) -> Option { + let mut storage = self.pool.access_storage_tagged("state_keeper").await; let PendingBatchData { params, txs } = - load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account)?; + load_pending_batch(&mut storage, self.current_l1_batch_number, self.fee_account) + .await?; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. let context = params.context_mode.inner_block_context().context; @@ -84,76 +83,79 @@ impl StateKeepe Some(PendingBatchData { params, txs }) } - fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { + async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { + let deadline = Instant::now() + max_wait; + // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. - poll_until(self.delay_interval, max_wait, || { + for _ in 0..poll_iters(self.delay_interval, max_wait) { // We create a new filter each time, since parameters may change and a previously // ignored transaction in the mempool may be scheduled for the execution. self.filter = l2_tx_filter(self.l1_gas_price_provider.as_ref(), self.fair_l2_gas_price); - self.mempool.has_next(&self.filter).then(|| { - // We only need to get the root hash when we're certain that we have a new transaction. - vlog::info!("getting previous block hash"); - let previous_l1_batch_hash = { - let mut storage = self.pool.access_storage_blocking(); - - let stage_started_at: Instant = Instant::now(); - let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( - &mut storage, - self.current_l1_batch_number, - ); - metrics::histogram!( - "server.state_keeper.wait_for_prev_hash_time", - stage_started_at.elapsed() - ); - hash - }; - vlog::info!("previous_l1_batch_hash: {}", previous_l1_batch_hash); - vlog::info!( - "(l1_gas_price,fair_l2_gas_price) for block {} is ({},{})", - self.current_l1_batch_number.0, - self.filter.l1_gas_price, - self.fair_l2_gas_price - ); - - l1_batch_params( - self.current_l1_batch_number, - self.fee_account, - (millis_since_epoch() / 1000) as u64, - previous_l1_batch_hash, - self.filter.l1_gas_price, - self.fair_l2_gas_price, - self.base_system_contracts.clone(), - ) - }) - }) + // We only need to get the root hash when we're certain that we have a new transaction. + if !self.mempool.has_next(&self.filter) { + tokio::time::sleep(self.delay_interval).await; + continue; + } + + let (prev_hash, prev_timestamp) = self.load_previous_l1_batch_params().await; + // We cannot create two L1 batches with the same timestamp (forbidden by the bootloader). + // Hence, we wait until the current timestamp is larger. We can use `timeout_at` + // since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. + let current_timestamp = + tokio::time::timeout_at(deadline.into(), sleep_past(prev_timestamp)); + let current_timestamp = current_timestamp.await.ok()?; + + vlog::info!( + "(l1_gas_price, fair_l2_gas_price) for L1 batch #{} is ({}, {})", + self.current_l1_batch_number.0, + self.filter.l1_gas_price, + self.fair_l2_gas_price + ); + return Some(l1_batch_params( + self.current_l1_batch_number, + self.fee_account, + current_timestamp, + prev_hash, + self.filter.l1_gas_price, + self.fair_l2_gas_price, + self.base_system_contracts.clone(), + )); + } + None } - fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { + async fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { let new_miniblock_timestamp = (millis_since_epoch() / 1000) as u64; Some(new_miniblock_timestamp) } - fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { - poll_until(self.delay_interval, max_wait, || { + async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { + for _ in 0..poll_iters(self.delay_interval, max_wait) { let started_at = Instant::now(); let res = self.mempool.next_transaction(&self.filter); metrics::histogram!( "server.state_keeper.get_tx_from_mempool", started_at.elapsed(), ); - res - }) + if let Some(res) = res { + return Some(res); + } else { + tokio::time::sleep(self.delay_interval).await; + continue; + } + } + None } - fn rollback(&mut self, tx: &Transaction) { + async fn rollback(&mut self, tx: Transaction) { // Reset nonces in the mempool. - self.mempool.rollback(tx); + self.mempool.rollback(&tx); // Insert the transaction back. - self.mempool.insert(vec![tx.clone()], Default::default()); + self.mempool.insert(vec![tx], HashMap::new()); } - fn reject(&mut self, rejected: &Transaction, error: &str) { + async fn reject(&mut self, rejected: &Transaction, error: &str) { assert!( !rejected.is_l1(), "L1 transactions should not be rejected: {}", @@ -164,7 +166,7 @@ impl StateKeepe self.mempool.rollback(rejected); // Mark tx as rejected in the storage. - let mut storage = self.pool.access_storage_blocking(); + let mut storage = self.pool.access_storage_tagged("state_keeper").await; metrics::increment_counter!("server.state_keeper.rejected_transactions"); vlog::warn!( "transaction {} is rejected with error {}", @@ -173,24 +175,21 @@ impl StateKeepe ); storage .transactions_dal() - .mark_tx_as_rejected(rejected.hash(), &format!("rejected: {}", error)); + .mark_tx_as_rejected(rejected.hash(), &format!("rejected: {}", error)) + .await; } - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { - let pool = self.pool.clone(); - let mut storage = pool.access_storage_blocking(); - seal_miniblock_impl( - self.current_miniblock_number, + async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { + let command = updates_manager.seal_miniblock_command( self.current_l1_batch_number, - &mut self.statistics, - &mut storage, - updates_manager, - false, + self.current_miniblock_number, + self.l2_erc20_bridge_addr, ); + self.miniblock_sealer_handle.submit(command).await; self.current_miniblock_number += 1; } - fn seal_l1_batch( + async fn seal_l1_batch( &mut self, block_result: VmBlockResult, updates_manager: UpdatesManager, @@ -202,64 +201,176 @@ impl StateKeepe "Batch timestamps don't match, batch number {}", self.current_l1_batch_number() ); + + // We cannot start sealing an L1 batch until we've sealed all miniblocks included in it. + self.miniblock_sealer_handle.wait_for_all_commands().await; + let pool = self.pool.clone(); - let mut storage = pool.access_storage_blocking(); - seal_l1_batch_impl( - self.current_miniblock_number, - self.current_l1_batch_number, - &mut self.statistics, - &mut storage, - block_result, - updates_manager, - block_context, - ); + let mut storage = pool.access_storage_tagged("state_keeper").await; + updates_manager + .seal_l1_batch( + &mut storage, + self.current_miniblock_number, + self.current_l1_batch_number, + block_result, + block_context, + self.l2_erc20_bridge_addr, + ) + .await; self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; } } +/// Sleeps until the current timestamp is larger than the provided `timestamp`. +/// +/// Returns the current timestamp after the sleep. It is guaranteed to be larger than `timestamp`. +async fn sleep_past(timestamp: u64) -> u64 { + let mut current_timestamp_millis = millis_since_epoch(); + let mut current_timestamp = (current_timestamp_millis / 1_000) as u64; + match timestamp.cmp(¤t_timestamp) { + cmp::Ordering::Less => return current_timestamp, + cmp::Ordering::Equal => { + vlog::info!( + "Current timestamp {} is equal to previous L1 batch timestamp; waiting until \ + timestamp increases", + extractors::display_timestamp(current_timestamp) + ); + } + cmp::Ordering::Greater => { + // This situation can be triggered if the system keeper is started on a pod with a different + // system time, or if it is buggy. Thus, a one-time error could require no actions if L1 batches + // are expected to be generated frequently. + vlog::error!( + "Previous L1 batch timestamp {} is larger than the current timestamp {}", + extractors::display_timestamp(timestamp), + extractors::display_timestamp(current_timestamp) + ); + } + } + + // This loop should normally run once, since `tokio::time::sleep` sleeps *at least* the specified duration. + // The logic is organized in a loop for marginal cases, such as the system time getting changed during `sleep()`. + loop { + // Time to catch up to `timestamp`; panic / underflow on subtraction is never triggered + // since we've ensured that `timestamp >= current_timestamp`. + let wait_seconds = timestamp - current_timestamp; + // Time to wait until the current timestamp increases. + let wait_millis = 1_001 - (current_timestamp_millis % 1_000) as u64; + let wait = Duration::from_millis(wait_millis + wait_seconds * 1_000); + + tokio::time::sleep(wait).await; + current_timestamp_millis = millis_since_epoch(); + current_timestamp = (current_timestamp_millis / 1_000) as u64; + + if current_timestamp > timestamp { + return current_timestamp; + } + } +} + impl MempoolIO { - pub(crate) fn new( + pub(in crate::state_keeper) async fn new( mempool: MempoolGuard, + miniblock_sealer_handle: MiniblockSealerHandle, + l1_gas_price_provider: Arc, pool: ConnectionPool, - fee_account: Address, - fair_l2_gas_price: u64, + config: &StateKeeperConfig, delay_interval: Duration, - l1_gas_price_provider: Arc, - base_system_contracts_hashes: BaseSystemContractsHashes, + l2_erc20_bridge_addr: Address, ) -> Self { - let mut storage = pool.access_storage_blocking(); - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); - let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); - let num_contracts = storage.storage_load_dal().load_number_of_contracts(); - let filter = L2TxFilter::default(); // Will be initialized properly on the first newly opened batch. - - let base_system_contracts = storage.storage_dal().get_base_system_contracts( - base_system_contracts_hashes.bootloader, - base_system_contracts_hashes.default_aa, - ); + let mut storage = pool.access_storage_tagged("state_keeper").await; + let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; + let base_system_contracts = storage + .storage_dal() + .get_base_system_contracts(config.bootloader_hash, config.default_aa_hash) + .await; drop(storage); Self { mempool, pool, - filter, + filter: L2TxFilter::default(), + // ^ Will be initialized properly on the first newly opened batch current_l1_batch_number: last_sealed_block_header.number + 1, + miniblock_sealer_handle, current_miniblock_number: last_miniblock_number + 1, - fee_account, - fair_l2_gas_price, + fee_account: config.fee_account_addr, + fair_l2_gas_price: config.fair_l2_gas_price, delay_interval, - statistics: StateKeeperStats { num_contracts }, l1_gas_price_provider, base_system_contracts, + l2_erc20_bridge_addr, } } + + async fn load_previous_l1_batch_params(&self) -> (U256, u64) { + vlog::info!("Getting previous L1 batch hash"); + let stage_started_at: Instant = Instant::now(); + + let mut storage = self.pool.access_storage_tagged("state_keeper").await; + let (batch_hash, batch_timestamp) = + extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) + .await; + + metrics::histogram!( + "server.state_keeper.wait_for_prev_hash_time", + stage_started_at.elapsed() + ); + vlog::info!( + "Got previous L1 batch hash: {batch_hash} and timestamp: {}", + extractors::display_timestamp(batch_timestamp) + ); + (batch_hash, batch_timestamp) + } } -/// Getters reqiored for testing the MempoolIO. +/// Getters required for testing the MempoolIO. #[cfg(test)] impl MempoolIO { pub(super) fn filter(&self) -> &L2TxFilter { &self.filter } } + +#[cfg(test)] +mod tests { + use tokio::time::timeout_at; + + use zksync_utils::time::seconds_since_epoch; + + use super::*; + + // This test defensively uses large deadlines in order to account for tests running in parallel etc. + #[tokio::test] + async fn sleeping_past_timestamp() { + let past_timestamps = [0, 1_000, 1_000_000_000, seconds_since_epoch() - 10]; + for timestamp in past_timestamps { + let deadline = Instant::now() + Duration::from_secs(1); + timeout_at(deadline.into(), sleep_past(timestamp)) + .await + .unwrap(); + } + + let current_timestamp = seconds_since_epoch(); + let deadline = Instant::now() + Duration::from_secs(2); + let ts = timeout_at(deadline.into(), sleep_past(current_timestamp)) + .await + .unwrap(); + assert!(ts > current_timestamp); + + let future_timestamp = seconds_since_epoch() + 1; + let deadline = Instant::now() + Duration::from_secs(3); + let ts = timeout_at(deadline.into(), sleep_past(future_timestamp)) + .await + .unwrap(); + assert!(ts > future_timestamp); + + let future_timestamp = seconds_since_epoch() + 1; + let deadline = Instant::now() + Duration::from_millis(100); + // ^ This deadline is too small (we need at least 1_000ms) + let result = timeout_at(deadline.into(), sleep_past(future_timestamp)).await; + assert!(result.is_err()); + } +} diff --git a/core/bin/zksync_core/src/state_keeper/io/mod.rs b/core/bin/zksync_core/src/state_keeper/io/mod.rs index a8eb6ac6ffb8..359d6738aa8f 100644 --- a/core/bin/zksync_core/src/state_keeper/io/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/mod.rs @@ -1,20 +1,26 @@ -use std::time::Duration; +use async_trait::async_trait; +use tokio::sync::{mpsc, oneshot}; -use vm::vm_with_bootloader::BlockContextMode; -use vm::vm_with_bootloader::DerivedBlockContext; +use std::{ + fmt, + time::{Duration, Instant}, +}; + +use vm::vm_with_bootloader::{BlockContextMode, DerivedBlockContext}; use vm::zk_evm::block_properties::BlockProperties; use vm::VmBlockResult; -use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction}; - -use super::updates::UpdatesManager; - -pub(crate) use mempool::MempoolIO; use zksync_contracts::BaseSystemContracts; +use zksync_dal::ConnectionPool; +use zksync_types::{L1BatchNumber, MiniblockNumber, Transaction}; pub(crate) mod common; pub(crate) mod mempool; pub(crate) mod seal_logic; +pub(crate) use self::mempool::MempoolIO; + +use super::updates::{MiniblockSealCommand, UpdatesManager}; + #[cfg(test)] mod tests; @@ -48,35 +54,239 @@ pub struct PendingBatchData { /// `StateKeeperIO` provides the interactive layer for the state keeper: /// it's used to receive volatile parameters (such as batch parameters), and also it's used to perform /// mutable operations on the persistent state (e.g. persist executed batches). -pub trait StateKeeperIO: 'static + std::fmt::Debug + Send { +#[async_trait] +pub trait StateKeeperIO: 'static + Send { /// Returns the number of the currently processed L1 batch. fn current_l1_batch_number(&self) -> L1BatchNumber; /// Returns the number of the currently processed miniblock (aka L2 block). fn current_miniblock_number(&self) -> MiniblockNumber; /// Returns the data on the batch that was not sealed before the server restart. /// See `PendingBatchData` doc-comment for details. - fn load_pending_batch(&mut self) -> Option; + async fn load_pending_batch(&mut self) -> Option; /// Blocks for up to `max_wait` until the parameters for the next L1 batch are available. /// Returns the data required to initialize the VM for the next batch. - fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; + async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the parameters for the next miniblock are available. /// Right now it's only a timestamp. - fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option; + async fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. - fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option; + async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option; /// Marks the transaction as "not executed", so it can be retrieved from the IO again. - fn rollback(&mut self, tx: &Transaction); + async fn rollback(&mut self, tx: Transaction); /// Marks the transaction as "rejected", e.g. one that is not correct and can't be executed. - fn reject(&mut self, tx: &Transaction, error: &str); + async fn reject(&mut self, tx: &Transaction, error: &str); /// Marks the miniblock (aka L2 block) as sealed. /// Returns the timestamp for the next miniblock. - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager); + async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager); /// Marks the L1 batch as sealed. - fn seal_l1_batch( + async fn seal_l1_batch( &mut self, block_result: VmBlockResult, updates_manager: UpdatesManager, block_context: DerivedBlockContext, ); } + +impl fmt::Debug for dyn StateKeeperIO { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("StateKeeperIO") + .field("current_l1_batch_number", &self.current_l1_batch_number()) + .field("current_miniblock_number", &self.current_miniblock_number()) + .finish() + } +} + +/// A command together with the return address allowing to track command processing completion. +#[derive(Debug)] +struct Completable { + command: T, + completion_sender: oneshot::Sender<()>, +} + +/// Handle for [`MiniblockSealer`] allowing to submit [`MiniblockSealCommand`]s. +#[derive(Debug)] +pub(crate) struct MiniblockSealerHandle { + commands_sender: mpsc::Sender>, + latest_completion_receiver: Option>, + // If true, `submit()` will wait for the operation to complete. + is_sync: bool, +} + +impl MiniblockSealerHandle { + const SHUTDOWN_MSG: &'static str = "miniblock sealer unexpectedly shut down"; + + /// Submits a new sealing `command` to the sealer that this handle is attached to. + /// + /// If there are currently too many unprocessed commands, this method will wait until + /// enough of them are processed (i.e., there is backpressure). + pub async fn submit(&mut self, command: MiniblockSealCommand) { + let miniblock_number = command.miniblock_number; + vlog::debug!( + "Enqueuing sealing command for miniblock #{miniblock_number} with #{} txs (L1 batch #{})", + command.miniblock.executed_transactions.len(), + command.l1_batch_number + ); + + let start = Instant::now(); + let (completion_sender, completion_receiver) = oneshot::channel(); + self.latest_completion_receiver = Some(completion_receiver); + let command = Completable { + command, + completion_sender, + }; + self.commands_sender + .send(command) + .await + .expect(Self::SHUTDOWN_MSG); + + let elapsed = start.elapsed(); + let queue_capacity = self.commands_sender.capacity(); + vlog::debug!( + "Enqueued sealing command for miniblock #{miniblock_number} (took {elapsed:?}; \ + available queue capacity: {queue_capacity})" + ); + + if self.is_sync { + self.wait_for_all_commands().await; + } else { + metrics::gauge!( + "server.state_keeper.miniblock.seal_queue.capacity", + queue_capacity as f64 + ); + metrics::histogram!( + "server.state_keeper.miniblock.seal_queue.latency", + elapsed, + "stage" => "submit" + ); + } + } + + /// Waits until all previously submitted commands are fully processed by the sealer. + pub async fn wait_for_all_commands(&mut self) { + vlog::debug!( + "Requested waiting for miniblock seal queue to empty; current available capacity: {}", + self.commands_sender.capacity() + ); + + let start = Instant::now(); + let completion_receiver = self.latest_completion_receiver.take(); + if let Some(completion_receiver) = completion_receiver { + completion_receiver.await.expect(Self::SHUTDOWN_MSG); + } + + let elapsed = start.elapsed(); + vlog::debug!("Miniblock seal queue is emptied (took {elapsed:?})"); + + // Since this method called from outside is essentially a no-op if `self.is_sync`, + // we don't report its metrics in this case. + if !self.is_sync { + metrics::histogram!( + "server.state_keeper.miniblock.seal_queue.latency", + elapsed, + "stage" => "wait_for_all_commands" + ); + metrics::gauge!( + "server.state_keeper.miniblock.seal_queue.capacity", + self.commands_sender.capacity() as f64 + ); + } + } +} + +/// Component responsible for sealing miniblocks (i.e., storing their data to Postgres). +#[derive(Debug)] +pub(crate) struct MiniblockSealer { + pool: ConnectionPool, + is_sync: bool, + // Weak sender handle to get queue capacity stats. + commands_sender: mpsc::WeakSender>, + commands_receiver: mpsc::Receiver>, +} + +impl MiniblockSealer { + /// Creates a sealer that will use the provided Postgres connection and will have the specified + /// `command_capacity` for unprocessed sealing commands. + pub(crate) fn new( + pool: ConnectionPool, + mut command_capacity: usize, + ) -> (Self, MiniblockSealerHandle) { + let is_sync = command_capacity == 0; + command_capacity = command_capacity.max(1); + + let (commands_sender, commands_receiver) = mpsc::channel(command_capacity); + let this = Self { + pool, + is_sync, + commands_sender: commands_sender.downgrade(), + commands_receiver, + }; + let handle = MiniblockSealerHandle { + commands_sender, + latest_completion_receiver: None, + is_sync, + }; + (this, handle) + } + + /// Seals miniblocks as they are received from the [`MiniblockSealerHandle`]. This should be run + /// on a separate Tokio task. + pub async fn run(mut self) { + if self.is_sync { + vlog::info!("Starting synchronous miniblock sealer"); + } else if let Some(sender) = self.commands_sender.upgrade() { + vlog::info!( + "Starting async miniblock sealer with queue capacity {}", + sender.max_capacity() + ); + } else { + vlog::warn!("Miniblock sealer not started, since its handle is already dropped"); + } + + let mut miniblock_seal_delta: Option = None; + // Commands must be processed sequentially: a later miniblock cannot be saved before + // an earlier one. + while let Some(completable) = self.next_command().await { + let mut conn = self.pool.access_storage_tagged("state_keeper").await; + completable.command.seal(&mut conn).await; + if let Some(delta) = miniblock_seal_delta { + metrics::histogram!("server.state_keeper.miniblock.seal_delta", delta.elapsed()); + } + miniblock_seal_delta = Some(Instant::now()); + + completable.completion_sender.send(()).ok(); + // ^ We don't care whether anyone listens to the processing progress + } + } + + async fn next_command(&mut self) -> Option> { + vlog::debug!("Polling miniblock seal queue for next command"); + let start = Instant::now(); + let command = self.commands_receiver.recv().await; + let elapsed = start.elapsed(); + + if let Some(completable) = &command { + vlog::debug!( + "Received command to seal miniblock #{} (polling took {elapsed:?})", + completable.command.miniblock_number + ); + } + + if !self.is_sync { + metrics::histogram!( + "server.state_keeper.miniblock.seal_queue.latency", + elapsed, + "stage" => "next_command" + ); + if let Some(sender) = self.commands_sender.upgrade() { + metrics::gauge!( + "server.state_keeper.miniblock.seal_queue.capacity", + sender.capacity() as f64 + ); + } + } + + command + } +} diff --git a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs index b30b496c40ad..b6c793b9050c 100644 --- a/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/bin/zksync_core/src/state_keeper/io/seal_logic.rs @@ -1,528 +1,640 @@ //! This module is a source-of-truth on what is expected to be done when sealing a block. //! It contains the logic of the block sealing, which is used by both the mempool-based and external node IO. -use std::time::{Duration, Instant}; +use itertools::Itertools; -use vm::vm_with_bootloader::BlockContextMode; -use vm::vm_with_bootloader::DerivedBlockContext; -use vm::VmBlockResult; +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; + +use vm::{ + vm_with_bootloader::{ + get_bootloader_memory, BlockContextMode, DerivedBlockContext, TxExecutionMode, + }, + VmBlockResult, +}; +use zksync_config::constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_dal::StorageProcessor; use zksync_types::{ - block::L1BatchHeader, - block::MiniblockHeader, + block::{L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, + l2_to_l1_log::L2ToL1Log, + tx::{ + tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, + TransactionExecutionResult, + }, + zk_evm::aux_structures::LogQuery, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - L1BatchNumber, MiniblockNumber, + Address, ExecuteTransactionCommon, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, + StorageLogQuery, StorageValue, Transaction, VmEvent, H256, U256, }; use zksync_utils::{miniblock_hash, time::millis_since_epoch}; -use crate::state_keeper::{extractors, io::common::StateKeeperStats, updates::UpdatesManager}; - -/// Persists an L1 batch in the storage. -/// This action includes a creation of an empty "fictive" miniblock that contains the events -/// generated during the bootloader "tip phase". -pub(crate) fn seal_l1_batch_impl( - current_miniblock_number: MiniblockNumber, - current_l1_batch_number: L1BatchNumber, - statistics: &mut StateKeeperStats, - storage: &mut StorageProcessor<'_>, - block_result: VmBlockResult, - mut updates_manager: UpdatesManager, - block_context: DerivedBlockContext, -) { - let started_at = Instant::now(); - let mut stage_started_at: Instant = Instant::now(); - - let mut transaction = storage.start_transaction_blocking(); - - // The vm execution was paused right after the last transaction was executed. - // There is some post-processing work that the VM needs to do before the block is fully processed. - let VmBlockResult { - full_result, - block_tip_result, - } = block_result; - assert!( - full_result.revert_reason.is_none(), - "VM must not revert when finalizing block. Revert reason: {:?}", - full_result.revert_reason - ); - track_l1_batch_execution_stage("vm_finalization", &mut stage_started_at, None); - - updates_manager.extend_from_fictive_transaction(block_tip_result.logs); - // Seal fictive miniblock with last events and storage logs. - seal_miniblock_impl( - current_miniblock_number, - current_l1_batch_number, - statistics, - &mut transaction, - &updates_manager, - true, - ); - track_l1_batch_execution_stage("fictive_miniblock", &mut stage_started_at, None); - - let (_, deduped_log_queries) = sort_storage_access_queries( - full_result - .storage_log_queries - .iter() - .map(|log| &log.log_query), - ); - track_l1_batch_execution_stage( - "log_deduplication", - &mut stage_started_at, - Some(deduped_log_queries.len()), - ); - - let (l1_tx_count, l2_tx_count) = - extractors::l1_l2_tx_count(&updates_manager.l1_batch.executed_transactions); - vlog::info!( - "sealing l1 batch {:?} with {:?} ({:?} l2 + {:?} l1) txs, {:?} l2_l1_logs, {:?} events, (writes, reads): {:?} , (writes_dedup, reads_dedup): {:?} ", - current_l1_batch_number, - l1_tx_count + l2_tx_count, - l2_tx_count, - l1_tx_count, - full_result.l2_to_l1_logs.len(), - full_result.events.len(), - extractors::storage_log_query_write_read_counts(&full_result.storage_log_queries), - extractors::log_query_write_read_counts(&deduped_log_queries), - ); - - let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( - &mut transaction, - current_l1_batch_number, - ); - let block_context_properties = BlockContextMode::NewBlock(block_context, hash); - - let l1_batch = L1BatchHeader { - number: current_l1_batch_number, - is_finished: true, - timestamp: block_context.context.block_timestamp, - fee_account_address: block_context.context.operator_address, - priority_ops_onchain_data: updates_manager.l1_batch.priority_ops_onchain_data.clone(), - l1_tx_count: l1_tx_count as u16, - l2_tx_count: l2_tx_count as u16, - l2_to_l1_logs: full_result.l2_to_l1_logs, - l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), - bloom: Default::default(), - initial_bootloader_contents: extractors::get_initial_bootloader_memory( - &updates_manager.l1_batch, - block_context_properties, - ), - used_contract_hashes: full_result.used_contract_hashes, - base_fee_per_gas: block_context.base_fee, - l1_gas_price: updates_manager.l1_gas_price(), - l2_fair_gas_price: updates_manager.fair_l2_gas_price(), - base_system_contracts_hashes: updates_manager.base_system_contract_hashes(), +use crate::state_keeper::{ + extractors, + updates::{L1BatchUpdates, MiniblockSealCommand, UpdatesManager}, +}; + +#[derive(Debug, Clone, Copy)] +struct SealProgressMetricNames { + target: &'static str, + stage_latency: &'static str, + entity_count: &'static str, + latency_per_unit: &'static str, +} + +impl SealProgressMetricNames { + const L1_BATCH: Self = Self { + target: "L1 batch", + stage_latency: "server.state_keeper.l1_batch.sealed_time_stage", + entity_count: "server.state_keeper.l1_batch.sealed_entity_count", + latency_per_unit: "server.state_keeper.l1_batch.sealed_entity_per_unit", }; - transaction - .blocks_dal() - .insert_l1_batch(l1_batch, updates_manager.l1_batch.l1_gas_count); - track_l1_batch_execution_stage("insert_l1_batch_header", &mut stage_started_at, None); - - transaction - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(current_l1_batch_number); - track_l1_batch_execution_stage( - "set_l1_batch_number_for_miniblocks", - &mut stage_started_at, - None, - ); - - transaction - .transactions_dal() - .mark_txs_as_executed_in_l1_batch( - current_l1_batch_number, - &updates_manager.l1_batch.executed_transactions, - ); - track_l1_batch_execution_stage( - "mark_txs_as_executed_in_l1_batch", - &mut stage_started_at, - None, - ); - - let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries - .into_iter() - .partition(|log_query| log_query.rw_flag); - transaction - .storage_logs_dedup_dal() - .insert_protective_reads(current_l1_batch_number, &protective_reads); - track_l1_batch_execution_stage( - "insert_protective_reads", - &mut stage_started_at, - Some(protective_reads.len()), - ); - - transaction - .storage_logs_dedup_dal() - .insert_initial_writes(current_l1_batch_number, &deduplicated_writes); - track_l1_batch_execution_stage( - "insert_initial_writes", - &mut stage_started_at, - Some(deduplicated_writes.len()), - ); - - transaction.commit_blocking(); - track_l1_batch_execution_stage("commit_l1_batch", &mut stage_started_at, None); - - let writes_metrics = updates_manager.storage_writes_deduplicator.metrics(); - // Sanity check. - assert_eq!( - deduplicated_writes.len(), - writes_metrics.initial_storage_writes + writes_metrics.repeated_storage_writes, - "Results of in-flight and common deduplications are mismatched" - ); - metrics::histogram!( - "server.state_keeper.l1_batch.initial_writes", - writes_metrics.initial_storage_writes as f64 - ); - metrics::histogram!( - "server.state_keeper.l1_batch.repeated_writes", - writes_metrics.repeated_storage_writes as f64 - ); - - metrics::histogram!( - "server.state_keeper.l1_batch.transactions_in_l1_batch", - updates_manager.l1_batch.executed_transactions.len() as f64 - ); - metrics::histogram!( - "server.l1_batch.latency", - ((millis_since_epoch() - block_context.context.block_timestamp as u128 * 1000) as f64) / 1000f64, - "stage" => "sealed" - ); - - metrics::histogram!( - "server.state_keeper.l1_batch.sealed_time", - started_at.elapsed(), - ); - vlog::debug!( - "sealed l1 batch {} in {:?}", - current_l1_batch_number, - started_at.elapsed() - ); + const MINIBLOCK: Self = Self { + target: "miniblock", + stage_latency: "server.state_keeper.miniblock.sealed_time_stage", + entity_count: "server.state_keeper.miniblock.sealed_entity_count", + latency_per_unit: "server.state_keeper.miniblock.sealed_entity_per_unit", + }; +} + +/// Tracking progress of L1 batch sealing. +#[derive(Debug)] +struct SealProgress { + metric_names: SealProgressMetricNames, + is_fictive: Option, + stage_start: Instant, +} + +impl SealProgress { + fn for_l1_batch() -> Self { + Self { + metric_names: SealProgressMetricNames::L1_BATCH, + is_fictive: None, + stage_start: Instant::now(), + } + } + + fn for_miniblock(is_fictive: bool) -> Self { + Self { + metric_names: SealProgressMetricNames::MINIBLOCK, + is_fictive: Some(is_fictive), + stage_start: Instant::now(), + } + } + + fn end_stage(&mut self, stage: &'static str, count: Option) { + const MIN_STAGE_DURATION_TO_REPORT: Duration = Duration::from_millis(10); + + let elapsed = self.stage_start.elapsed(); + if elapsed > MIN_STAGE_DURATION_TO_REPORT { + let target = self.metric_names.target; + vlog::debug!("{target} execution stage {stage} took {elapsed:?} with count {count:?}"); + } + + let (l1_batch_labels, miniblock_labels); + let labels: &[_] = if let Some(is_fictive) = self.is_fictive { + let is_fictive_label = if is_fictive { "true" } else { "false" }; + miniblock_labels = [("is_fictive", is_fictive_label), ("stage", stage)]; + &miniblock_labels + } else { + l1_batch_labels = [("stage", stage)]; + &l1_batch_labels + }; + metrics::histogram!(self.metric_names.stage_latency, elapsed, labels); + + if let Some(count) = count { + metrics::histogram!(self.metric_names.entity_count, count as f64, labels); + if count > 0 { + metrics::histogram!( + self.metric_names.latency_per_unit, + elapsed.div_f64(count as f64), + labels + ); + } + } + self.stage_start = Instant::now(); + } } -// Seal miniblock with the given number. -// -// If `is_fictive` flag is set to true, then it is assumed that we should seal a fictive miniblock with no transactions -// in it. It is needed because there might be some storage logs/events that are created after the last processed tx in -// l1 batch: after the last transaction is processed, bootloader enters the "tip" phase in which it can still generate -// events (e.g. one for sending fees to the operator). -pub(crate) fn seal_miniblock_impl( - current_miniblock_number: MiniblockNumber, - current_l1_batch_number: L1BatchNumber, - statistics: &mut StateKeeperStats, - storage: &mut StorageProcessor<'_>, - updates_manager: &UpdatesManager, - is_fictive: bool, -) { - miniblock_assertions(updates_manager, is_fictive); - - let started_at = Instant::now(); - let mut stage_started_at: Instant = Instant::now(); - - let (l1_tx_count, l2_tx_count) = - extractors::l1_l2_tx_count(&updates_manager.miniblock.executed_transactions); - vlog::info!( - "sealing miniblock {} (l1 batch {}) with {} ({} l2 + {} l1) txs, {} events, (writes, reads): {:?}", +impl UpdatesManager { + /// Persists an L1 batch in the storage. + /// This action includes a creation of an empty "fictive" miniblock that contains + /// the events generated during the bootloader "tip phase". + pub(crate) async fn seal_l1_batch( + mut self, + storage: &mut StorageProcessor<'_>, + current_miniblock_number: MiniblockNumber, + current_l1_batch_number: L1BatchNumber, + block_result: VmBlockResult, + block_context: DerivedBlockContext, + l2_erc20_bridge_addr: Address, + ) { + let started_at = Instant::now(); + let mut progress = SealProgress::for_l1_batch(); + let mut transaction = storage.start_transaction().await; + + // The vm execution was paused right after the last transaction was executed. + // There is some post-processing work that the VM needs to do before the block is fully processed. + let VmBlockResult { + full_result, + block_tip_result, + } = block_result; + assert!( + full_result.revert_reason.is_none(), + "VM must not revert when finalizing block. Revert reason: {:?}", + full_result.revert_reason + ); + progress.end_stage("vm_finalization", None); + + self.extend_from_fictive_transaction(block_tip_result.logs); + // Seal fictive miniblock with last events and storage logs. + let miniblock_command = self.seal_miniblock_command( + current_l1_batch_number, current_miniblock_number, + l2_erc20_bridge_addr, + ); + miniblock_command.seal_inner(&mut transaction, true).await; + progress.end_stage("fictive_miniblock", None); + + let (_, deduped_log_queries) = sort_storage_access_queries( + full_result + .storage_log_queries + .iter() + .map(|log| &log.log_query), + ); + progress.end_stage("log_deduplication", Some(deduped_log_queries.len())); + + let (l1_tx_count, l2_tx_count) = l1_l2_tx_count(&self.l1_batch.executed_transactions); + let (writes_count, reads_count) = + storage_log_query_write_read_counts(&full_result.storage_log_queries); + let (dedup_writes_count, dedup_reads_count) = + log_query_write_read_counts(deduped_log_queries.iter()); + vlog::info!( + "Sealing L1 batch {current_l1_batch_number} with {total_tx_count} \ + ({l2_tx_count} L2 + {l1_tx_count} L1) txs, {l2_to_l1_log_count} l2_l1_logs, \ + {event_count} events, {reads_count} reads ({dedup_reads_count} deduped), \ + {writes_count} writes ({dedup_writes_count} deduped)", + total_tx_count = l1_tx_count + l2_tx_count, + l2_to_l1_log_count = full_result.l2_to_l1_logs.len(), + event_count = full_result.events.len() + ); + + let (prev_hash, prev_timestamp) = + extractors::wait_for_prev_l1_batch_params(&mut transaction, current_l1_batch_number) + .await; + let timestamp = block_context.context.block_timestamp; + assert!( + prev_timestamp < timestamp, + "Cannot seal L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ + meaning that L1 batch will be rejected by the bootloader", current_l1_batch_number, - l1_tx_count + l2_tx_count, - l2_tx_count, - l1_tx_count, - updates_manager.miniblock.events.len(), - extractors::storage_log_query_write_read_counts(&updates_manager.miniblock.storage_logs), + extractors::display_timestamp(prev_timestamp), + extractors::display_timestamp(timestamp) ); + let block_context_properties = BlockContextMode::NewBlock(block_context, prev_hash); + + let l1_batch = L1BatchHeader { + number: current_l1_batch_number, + is_finished: true, + timestamp, + fee_account_address: block_context.context.operator_address, + priority_ops_onchain_data: self.l1_batch.priority_ops_onchain_data.clone(), + l1_tx_count: l1_tx_count as u16, + l2_tx_count: l2_tx_count as u16, + l2_to_l1_logs: full_result.l2_to_l1_logs, + l2_to_l1_messages: extract_long_l2_to_l1_messages(&full_result.events), + bloom: Default::default(), + initial_bootloader_contents: Self::initial_bootloader_memory( + &self.l1_batch, + block_context_properties, + ), + used_contract_hashes: full_result.used_contract_hashes, + base_fee_per_gas: block_context.base_fee, + l1_gas_price: self.l1_gas_price(), + l2_fair_gas_price: self.fair_l2_gas_price(), + base_system_contracts_hashes: self.base_system_contract_hashes(), + }; - let mut transaction = storage.start_transaction_blocking(); - let miniblock_header = MiniblockHeader { - number: current_miniblock_number, - timestamp: updates_manager.miniblock.timestamp, - hash: miniblock_hash(current_miniblock_number), - l1_tx_count: l1_tx_count as u16, - l2_tx_count: l2_tx_count as u16, - base_fee_per_gas: updates_manager.base_fee_per_gas(), - l1_gas_price: updates_manager.l1_gas_price(), - l2_fair_gas_price: updates_manager.fair_l2_gas_price(), - base_system_contracts_hashes: updates_manager.base_system_contract_hashes(), - }; + transaction + .blocks_dal() + .insert_l1_batch(&l1_batch, self.l1_batch.l1_gas_count) + .await; + progress.end_stage("insert_l1_batch_header", None); - transaction.blocks_dal().insert_miniblock(miniblock_header); - track_miniblock_execution_stage( - "insert_miniblock_header", - &mut stage_started_at, - None, - is_fictive, - ); - - transaction - .transactions_dal() - .mark_txs_as_executed_in_miniblock( - current_miniblock_number, - &updates_manager.miniblock.executed_transactions, - updates_manager.base_fee_per_gas().into(), + transaction + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(current_l1_batch_number) + .await; + progress.end_stage("set_l1_batch_number_for_miniblocks", None); + + transaction + .transactions_dal() + .mark_txs_as_executed_in_l1_batch( + current_l1_batch_number, + &self.l1_batch.executed_transactions, + ) + .await; + progress.end_stage("mark_txs_as_executed_in_l1_batch", None); + + let (deduplicated_writes, protective_reads): (Vec<_>, Vec<_>) = deduped_log_queries + .into_iter() + .partition(|log_query| log_query.rw_flag); + transaction + .storage_logs_dedup_dal() + .insert_protective_reads(current_l1_batch_number, &protective_reads) + .await; + progress.end_stage("insert_protective_reads", Some(protective_reads.len())); + + transaction + .storage_logs_dedup_dal() + .insert_initial_writes(current_l1_batch_number, &deduplicated_writes) + .await; + progress.end_stage("insert_initial_writes", Some(deduplicated_writes.len())); + + transaction.commit().await; + progress.end_stage("commit_l1_batch", None); + + let writes_metrics = self.storage_writes_deduplicator.metrics(); + // Sanity check metrics. + assert_eq!( + deduplicated_writes.len(), + writes_metrics.initial_storage_writes + writes_metrics.repeated_storage_writes, + "Results of in-flight and common deduplications are mismatched" + ); + + self.report_l1_batch_metrics( + started_at, + current_l1_batch_number, + timestamp, + &writes_metrics, + ); + } + + fn initial_bootloader_memory( + updates_accumulator: &L1BatchUpdates, + block_context: BlockContextMode, + ) -> Vec<(usize, U256)> { + let transactions_data = updates_accumulator + .executed_transactions + .iter() + .map(|res| res.transaction.clone().into()) + .collect(); + + let refunds = updates_accumulator + .executed_transactions + .iter() + .map(|res| res.operator_suggested_refund) + .collect(); + + let compressed_bytecodes = updates_accumulator + .executed_transactions + .iter() + .map(|res| res.compressed_bytecodes.clone()) + .collect(); + + get_bootloader_memory( + transactions_data, + refunds, + compressed_bytecodes, + TxExecutionMode::VerifyExecute, + block_context, + ) + } + + fn report_l1_batch_metrics( + &self, + started_at: Instant, + current_l1_batch_number: L1BatchNumber, + block_timestamp: u64, + writes_metrics: &DeduplicatedWritesMetrics, + ) { + metrics::histogram!( + "server.state_keeper.l1_batch.initial_writes", + writes_metrics.initial_storage_writes as f64 + ); + metrics::histogram!( + "server.state_keeper.l1_batch.repeated_writes", + writes_metrics.repeated_storage_writes as f64 + ); + + metrics::histogram!( + "server.state_keeper.l1_batch.transactions_in_l1_batch", + self.l1_batch.executed_transactions.len() as f64 ); - track_miniblock_execution_stage( - "mark_transactions_in_miniblock", - &mut stage_started_at, - Some(updates_manager.miniblock.executed_transactions.len()), - is_fictive, - ); - - let storage_logs = extractors::log_queries_to_storage_logs( - &updates_manager.miniblock.storage_logs, - updates_manager, - is_fictive, - ); - let write_logs = extractors::write_logs_from_storage_logs(storage_logs); - let write_logs_len = write_logs.iter().flat_map(|(_, logs)| logs).count(); - - transaction - .storage_logs_dal() - .insert_storage_logs(current_miniblock_number, &write_logs); - track_miniblock_execution_stage( - "insert_storage_logs", - &mut stage_started_at, - Some(write_logs_len), - is_fictive, - ); - - let unique_updates = transaction.storage_dal().apply_storage_logs(&write_logs); - track_miniblock_execution_stage( - "apply_storage_logs", - &mut stage_started_at, - Some(write_logs_len), - is_fictive, - ); - - let new_factory_deps = updates_manager.miniblock.new_factory_deps.clone(); - let new_factory_deps_len = new_factory_deps.iter().flat_map(|(_, deps)| deps).count(); - if !new_factory_deps.is_empty() { + let l1_batch_latency = + ((millis_since_epoch() - block_timestamp as u128 * 1_000) as f64) / 1_000.0; + metrics::histogram!( + "server.l1_batch.latency", + l1_batch_latency, + "stage" => "sealed" + ); + + metrics::histogram!( + "server.state_keeper.l1_batch.sealed_time", + started_at.elapsed(), + ); + vlog::debug!( + "sealed l1 batch {current_l1_batch_number} in {:?}", + started_at.elapsed() + ); + } +} + +impl MiniblockSealCommand { + pub async fn seal(&self, storage: &mut StorageProcessor<'_>) { + self.seal_inner(storage, false).await; + } + + /// Seals a miniblock with the given number. + /// + /// If `is_fictive` flag is set to true, then it is assumed that we should seal a fictive miniblock + /// with no transactions in it. It is needed because there might be some storage logs / events + /// that are created after the last processed tx in the L1 batch: after the last transaction is processed, + /// the bootloader enters the "tip" phase in which it can still generate events (e.g., + /// one for sending fees to the operator). + /// + /// `l2_erc20_bridge_addr` is required to extract the information on newly added tokens. + async fn seal_inner(&self, storage: &mut StorageProcessor<'_>, is_fictive: bool) { + self.assert_valid_miniblock(is_fictive); + + let l1_batch_number = self.l1_batch_number; + let miniblock_number = self.miniblock_number; + let started_at = Instant::now(); + let mut progress = SealProgress::for_miniblock(is_fictive); + + let (l1_tx_count, l2_tx_count) = l1_l2_tx_count(&self.miniblock.executed_transactions); + let (writes_count, reads_count) = + storage_log_query_write_read_counts(&self.miniblock.storage_logs); + vlog::info!( + "Sealing miniblock {miniblock_number} (L1 batch {l1_batch_number}) \ + with {total_tx_count} ({l2_tx_count} L2 + {l1_tx_count} L1) txs, {event_count} events, \ + {reads_count} reads, {writes_count} writes", + total_tx_count = l1_tx_count + l2_tx_count, + event_count = self.miniblock.events.len() + ); + + let mut transaction = storage.start_transaction().await; + let miniblock_header = MiniblockHeader { + number: miniblock_number, + timestamp: self.miniblock.timestamp, + hash: miniblock_hash(miniblock_number), + l1_tx_count: l1_tx_count as u16, + l2_tx_count: l2_tx_count as u16, + base_fee_per_gas: self.base_fee_per_gas, + l1_gas_price: self.l1_gas_price, + l2_fair_gas_price: self.fair_l2_gas_price, + base_system_contracts_hashes: self.base_system_contracts_hashes, + }; + + transaction + .blocks_dal() + .insert_miniblock(&miniblock_header) + .await; + progress.end_stage("insert_miniblock_header", None); + + transaction + .transactions_dal() + .mark_txs_as_executed_in_miniblock( + miniblock_number, + &self.miniblock.executed_transactions, + self.base_fee_per_gas.into(), + ) + .await; + progress.end_stage( + "mark_transactions_in_miniblock", + Some(self.miniblock.executed_transactions.len()), + ); + + let write_logs = self.extract_write_logs(is_fictive); + let write_log_count = write_logs.iter().map(|(_, logs)| logs.len()).sum(); + transaction + .storage_logs_dal() + .insert_storage_logs(miniblock_number, &write_logs) + .await; + progress.end_stage("insert_storage_logs", Some(write_log_count)); + + let unique_updates = transaction .storage_dal() - .insert_factory_deps(current_miniblock_number, new_factory_deps); + .apply_storage_logs(&write_logs) + .await; + progress.end_stage("apply_storage_logs", Some(write_log_count)); + + let new_factory_deps = &self.miniblock.new_factory_deps; + let new_factory_deps_count = new_factory_deps.len(); + if !new_factory_deps.is_empty() { + transaction + .storage_dal() + .insert_factory_deps(miniblock_number, new_factory_deps) + .await; + } + progress.end_stage("insert_factory_deps", Some(new_factory_deps_count)); + + // Factory deps should be inserted before using `count_deployed_contracts`. + let deployed_contract_count = Self::count_deployed_contracts(&unique_updates); + progress.end_stage("extract_contracts_deployed", Some(deployed_contract_count)); + + let added_tokens = extract_added_tokens(self.l2_erc20_bridge_addr, &self.miniblock.events); + progress.end_stage("extract_added_tokens", Some(added_tokens.len())); + let added_tokens_len = added_tokens.len(); + if !added_tokens.is_empty() { + transaction.tokens_dal().add_tokens(added_tokens).await; + } + progress.end_stage("insert_tokens", Some(added_tokens_len)); + + let miniblock_events = self.extract_events(is_fictive); + let miniblock_event_count = miniblock_events + .iter() + .map(|(_, events)| events.len()) + .sum(); + progress.end_stage("extract_events", Some(miniblock_event_count)); + transaction + .events_dal() + .save_events(miniblock_number, &miniblock_events) + .await; + progress.end_stage("insert_events", Some(miniblock_event_count)); + + let l2_to_l1_logs = self.extract_l2_to_l1_logs(is_fictive); + let l2_to_l1_log_count = l2_to_l1_logs + .iter() + .map(|(_, l2_to_l1_logs)| l2_to_l1_logs.len()) + .sum(); + progress.end_stage("extract_l2_to_l1_logs", Some(l2_to_l1_log_count)); + transaction + .events_dal() + .save_l2_to_l1_logs(miniblock_number, &l2_to_l1_logs) + .await; + progress.end_stage("insert_l2_to_l1_logs", Some(l2_to_l1_log_count)); + + transaction.commit().await; + progress.end_stage("commit_miniblock", None); + self.report_miniblock_metrics(started_at); } - track_miniblock_execution_stage( - "insert_factory_deps", - &mut stage_started_at, - Some(new_factory_deps_len), - is_fictive, - ); - - // Factory deps should be inserted before using `contracts_deployed_this_miniblock`. - let deployed_contracts = - extractors::contracts_deployed_this_miniblock(unique_updates, &mut transaction); - if !deployed_contracts.is_empty() { - statistics.num_contracts += deployed_contracts.len() as u64; + + /// Performs several sanity checks to make sure that the miniblock is valid. + fn assert_valid_miniblock(&self, is_fictive: bool) { + assert_eq!(self.miniblock.executed_transactions.is_empty(), is_fictive); + + let first_tx_index = self.first_tx_index; + let next_tx_index = first_tx_index + self.miniblock.executed_transactions.len(); + let tx_index_range = if is_fictive { + next_tx_index..(next_tx_index + 1) + } else { + first_tx_index..next_tx_index + }; + + for event in &self.miniblock.events { + let tx_index = event.location.1 as usize; + assert!(tx_index_range.contains(&tx_index)); + } + for storage_log in &self.miniblock.storage_logs { + let tx_index = storage_log.log_query.tx_number_in_block as usize; + assert!(tx_index_range.contains(&tx_index)); + } } - let deployed_contracts_len = deployed_contracts - .iter() - .flat_map(|(_, contracts)| contracts) - .count(); - track_miniblock_execution_stage( - "extract_contracts_deployed", - &mut stage_started_at, - Some(deployed_contracts_len), - is_fictive, - ); - - let added_tokens = extract_added_tokens(&updates_manager.miniblock.events); - track_miniblock_execution_stage( - "extract_added_tokens", - &mut stage_started_at, - Some(added_tokens.len()), - is_fictive, - ); - let added_tokens_len = added_tokens.len(); - if !added_tokens.is_empty() { - transaction.tokens_dal().add_tokens(added_tokens); + + fn extract_write_logs(&self, is_fictive: bool) -> Vec<(H256, Vec)> { + let logs = self.miniblock.storage_logs.iter(); + let grouped_logs = logs.group_by(|log| log.log_query.tx_number_in_block); + + let grouped_logs = grouped_logs.into_iter().map(|(tx_index, logs)| { + let tx_hash = if is_fictive { + assert_eq!(tx_index as usize, self.first_tx_index); + H256::zero() + } else { + self.transaction(tx_index as usize).hash() + }; + let logs = logs.filter_map(|log| { + log.log_query + .rw_flag + .then(|| StorageLog::from_log_query(log)) + }); + (tx_hash, logs.collect()) + }); + grouped_logs.collect() } - track_miniblock_execution_stage( - "insert_tokens", - &mut stage_started_at, - Some(added_tokens_len), - is_fictive, - ); - - let events_this_miniblock = extractors::extract_events_this_block( - &updates_manager.miniblock.events, - updates_manager, - is_fictive, - ); - - let events_this_miniblock_len = events_this_miniblock - .iter() - .flat_map(|(_, events)| events.iter()) - .count(); - - track_miniblock_execution_stage( - "extract_events", - &mut stage_started_at, - Some(events_this_miniblock_len), - is_fictive, - ); - transaction - .events_dal() - .save_events(current_miniblock_number, events_this_miniblock); - track_miniblock_execution_stage( - "insert_events", - &mut stage_started_at, - Some(events_this_miniblock_len), - is_fictive, - ); - - let l2_to_l1_logs_this_miniblock = extractors::extract_l2_to_l1_logs_this_block( - &updates_manager.miniblock.l2_to_l1_logs, - updates_manager, - is_fictive, - ); - - let l2_to_l1_logs_this_miniblock_len = l2_to_l1_logs_this_miniblock - .iter() - .flat_map(|(_, l2_to_l1_logs)| l2_to_l1_logs.iter()) - .count(); - - track_miniblock_execution_stage( - "extract_l2_to_l1_logs", - &mut stage_started_at, - Some(l2_to_l1_logs_this_miniblock_len), - is_fictive, - ); - transaction - .events_dal() - .save_l2_to_l1_logs(current_miniblock_number, l2_to_l1_logs_this_miniblock); - track_miniblock_execution_stage( - "insert_l2_to_l1_logs", - &mut stage_started_at, - Some(l2_to_l1_logs_this_miniblock_len), - is_fictive, - ); - - transaction.commit_blocking(); - track_miniblock_execution_stage("commit_miniblock", &mut stage_started_at, None, is_fictive); - - metrics::histogram!( - "server.state_keeper.miniblock.transactions_in_miniblock", - updates_manager.miniblock.executed_transactions.len() as f64 - ); - metrics::histogram!( - "server.miniblock.latency", - ((millis_since_epoch() - updates_manager.miniblock.timestamp as u128 * 1000) as f64) / 1000f64, - "stage" => "sealed" - ); - metrics::histogram!( - "server.state_keeper.miniblock.sealed_time", - started_at.elapsed(), - ); - metrics::gauge!( - "server.miniblock.number", - current_miniblock_number.0 as f64, - "stage" => "sealed" - ); - - metrics::gauge!( - "server.state_keeper.storage_contracts_size", - statistics.num_contracts as f64 - ); - vlog::debug!( - "sealed miniblock {} in {:?}", - current_miniblock_number, - started_at.elapsed() - ); - - track_miniblock_execution_stage( - "apply_miniblock_updates_to_l1_batch_updates_accumulator", - &mut stage_started_at, - None, - is_fictive, - ); -} -/// Performs several sanity checks to make sure that the miniblock is valid. -fn miniblock_assertions(updates_manager: &UpdatesManager, is_fictive: bool) { - if is_fictive { - assert!(updates_manager.miniblock.executed_transactions.is_empty()); - } else { - assert!(!updates_manager.miniblock.executed_transactions.is_empty()); + fn transaction(&self, index: usize) -> &Transaction { + let tx_result = &self.miniblock.executed_transactions[index - self.first_tx_index]; + &tx_result.transaction } - let first_tx_index_in_miniblock = updates_manager.l1_batch.executed_transactions.len(); - let next_tx_index = updates_manager.pending_executed_transactions_len(); - let miniblock_tx_index_range = if is_fictive { - next_tx_index..(next_tx_index + 1) - } else { - first_tx_index_in_miniblock..next_tx_index - }; + fn count_deployed_contracts( + unique_updates: &HashMap, + ) -> usize { + let mut count = 0; + for (key, (_, value)) in unique_updates { + if *key.account().address() == ACCOUNT_CODE_STORAGE_ADDRESS { + let bytecode_hash = *value; + if bytecode_hash != H256::zero() { + count += 1; + } + } + } + count + } - for event in updates_manager.miniblock.events.iter() { - assert!(miniblock_tx_index_range.contains(&(event.location.1 as usize))) + fn extract_events(&self, is_fictive: bool) -> Vec<(IncludedTxLocation, Vec<&VmEvent>)> { + self.group_by_tx_location(&self.miniblock.events, is_fictive, |event| event.location.1) } - for storage_log in updates_manager.miniblock.storage_logs.iter() { - assert!( - miniblock_tx_index_range.contains(&(storage_log.log_query.tx_number_in_block as usize)) - ) + + fn group_by_tx_location<'a, T>( + &'a self, + entries: &'a [T], + is_fictive: bool, + tx_location: impl Fn(&T) -> u32, + ) -> Vec<(IncludedTxLocation, Vec<&'a T>)> { + let grouped_entries = entries.iter().group_by(|&entry| tx_location(entry)); + let grouped_entries = grouped_entries.into_iter().map(|(tx_index, entries)| { + let (tx_hash, tx_initiator_address) = if is_fictive { + assert_eq!(tx_index as usize, self.first_tx_index); + (H256::zero(), Address::zero()) + } else { + let tx = self.transaction(tx_index as usize); + (tx.hash(), tx.initiator_account()) + }; + + let location = IncludedTxLocation { + tx_hash, + tx_index_in_miniblock: tx_index - self.first_tx_index as u32, + tx_initiator_address, + }; + (location, entries.collect()) + }); + grouped_entries.collect() } -} -fn track_l1_batch_execution_stage( - stage: &'static str, - stage_started_at: &mut Instant, - count: Option, -) { - metrics::histogram!( - "server.state_keeper.l1_batch.sealed_time_stage", - stage_started_at.elapsed(), - "stage" => stage - ); - if let Some(count) = count { + fn extract_l2_to_l1_logs( + &self, + is_fictive: bool, + ) -> Vec<(IncludedTxLocation, Vec<&L2ToL1Log>)> { + self.group_by_tx_location(&self.miniblock.l2_to_l1_logs, is_fictive, |log| { + u32::from(log.tx_number_in_block) + }) + } + + fn report_miniblock_metrics(&self, started_at: Instant) { + let miniblock_number = self.miniblock_number; + metrics::histogram!( - "server.state_keeper.l1_batch.sealed_entity_count", - count as f64, - "stage" => stage + "server.state_keeper.miniblock.transactions_in_miniblock", + self.miniblock.executed_transactions.len() as f64 ); + let miniblock_latency = + ((millis_since_epoch() - self.miniblock.timestamp as u128 * 1_000) as f64) / 1_000.0; metrics::histogram!( - "server.state_keeper.l1_batch.sealed_entity_per_unit", - stage_started_at.elapsed().div_f64(count as f64), - "stage" => stage + "server.miniblock.latency", + miniblock_latency, + "stage" => "sealed" + ); + metrics::histogram!( + "server.state_keeper.miniblock.sealed_time", + started_at.elapsed(), + ); + metrics::gauge!( + "server.miniblock.number", + miniblock_number.0 as f64, + "stage" => "sealed" ); - } - *stage_started_at = Instant::now(); -} -fn track_miniblock_execution_stage( - stage: &'static str, - stage_started_at: &mut Instant, - count: Option, - is_fictive: bool, -) { - if stage_started_at.elapsed() > Duration::from_millis(10) { vlog::debug!( - "miniblock execution stage {} took {:?} with count {:?}", - stage, - stage_started_at.elapsed(), - count + "sealed miniblock {miniblock_number} in {:?}", + started_at.elapsed() ); } - metrics::histogram!( - "server.state_keeper.miniblock.sealed_time_stage", - stage_started_at.elapsed(), - "stage" => stage, - "is_fictive" => is_fictive.to_string(), - ); - if let Some(count) = count { - metrics::histogram!( - "server.state_keeper.miniblock.sealed_entity_count", - count as f64, - "stage" => stage, - "is_fictive" => is_fictive.to_string(), - ); - if count > 0 { - metrics::histogram!( - "server.state_keeper.miniblock.sealed_entity_per_unit", - stage_started_at.elapsed().div_f64(count as f64), - "stage" => stage, - "is_fictive" => is_fictive.to_string(), - ); +} + +fn l1_l2_tx_count(executed_transactions: &[TransactionExecutionResult]) -> (usize, usize) { + let mut l1_tx_count = 0; + let mut l2_tx_count = 0; + + for tx in executed_transactions { + if matches!(tx.transaction.common_data, ExecuteTransactionCommon::L1(_)) { + l1_tx_count += 1; + } else { + l2_tx_count += 1; + } + } + (l1_tx_count, l2_tx_count) +} + +fn log_query_write_read_counts<'a>(logs: impl Iterator) -> (usize, usize) { + let mut reads_count = 0; + let mut writes_count = 0; + + for log in logs { + if log.rw_flag { + writes_count += 1; + } else { + reads_count += 1; } } - *stage_started_at = Instant::now(); + (writes_count, reads_count) +} + +fn storage_log_query_write_read_counts(logs: &[StorageLogQuery]) -> (usize, usize) { + log_query_write_read_counts(logs.iter().map(|log| &log.log_query)) } diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs index 98a689a22991..ee9b1ac14b51 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/mod.rs @@ -1,27 +1,40 @@ +use futures::FutureExt; + use std::time::Duration; use db_test_macro::db_test; -use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; +use vm::vm_with_bootloader::{derive_base_fee_and_gas_per_pubdata, BlockContextMode}; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; +use zksync_types::{ + block::BlockGasCount, tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, + MiniblockNumber, StorageKey, VmEvent, H256, U256, +}; +use zksync_utils::time::millis_since_epoch; -use self::tester::Tester; -use crate::state_keeper::{io::StateKeeperIO, mempool_actor::l2_tx_filter}; +use crate::state_keeper::{ + io::{MiniblockSealer, StateKeeperIO}, + mempool_actor::l2_tx_filter, + tests::{ + create_block_metadata, create_execution_result, create_transaction, create_updates_manager, + default_block_context, default_vm_block_result, Query, + }, + updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, +}; mod tester; +use self::tester::Tester; + /// Ensure that MempoolIO.filter is correctly initialized right after mempool initialization. #[db_test] async fn test_filter_initialization(connection_pool: ConnectionPool) { let tester = Tester::new(); // Genesis is needed for proper mempool initialization. - tester.genesis(&connection_pool); - - let (mempool, _) = tester - .create_test_mempool_io(connection_pool) - .await - .unwrap(); + tester.genesis(&connection_pool).await; + let (mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; // Upon initialization, the filter should be set to the default values. assert_eq!(mempool.filter(), &L2TxFilter::default()); @@ -32,45 +45,41 @@ async fn test_filter_initialization(connection_pool: ConnectionPool) { async fn test_filter_with_pending_batch(connection_pool: ConnectionPool) { let tester = Tester::new(); - tester.genesis(&connection_pool); + tester.genesis(&connection_pool).await; // Insert a sealed batch so there will be a prev_l1_batch_state_root. // These gas values are random and don't matter for filter calculation as there will be a // pending batch the filter will be based off of. - tester.insert_miniblock(&connection_pool, 1, 5, 55, 555); - - tester.insert_sealed_batch(&connection_pool, 1); + tester + .insert_miniblock(&connection_pool, 1, 5, 55, 555) + .await; + tester.insert_sealed_batch(&connection_pool, 1).await; // Inserting a pending miniblock that isn't included in a sealed batch means there is a pending batch. // The gas values are randomly chosen but so affect filter values calculation. let (give_l1_gas_price, give_fair_l2_gas_price) = (100, 1000); - tester.insert_miniblock( - &connection_pool, - 2, - 10, - give_l1_gas_price, - give_fair_l2_gas_price, - ); - - let (mut mempool, _) = tester - .create_test_mempool_io(connection_pool) - .await - .unwrap(); + tester + .insert_miniblock( + &connection_pool, + 2, + 10, + give_l1_gas_price, + give_fair_l2_gas_price, + ) + .await; + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; // Before the mempool knows there is a pending batch, the filter is still set to the default values. assert_eq!(mempool.filter(), &L2TxFilter::default()); - mempool.load_pending_batch(); - + mempool.load_pending_batch().await; let (want_base_fee, want_gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(give_l1_gas_price, give_fair_l2_gas_price); - let want_filter = L2TxFilter { l1_gas_price: give_l1_gas_price, fee_per_gas: want_base_fee, gas_per_pubdata: want_gas_per_pubdata as u32, }; - assert_eq!(mempool.filter(), &want_filter); } @@ -78,12 +87,14 @@ async fn test_filter_with_pending_batch(connection_pool: ConnectionPool) { #[db_test] async fn test_filter_with_no_pending_batch(connection_pool: ConnectionPool) { let tester = Tester::new(); - tester.genesis(&connection_pool); + tester.genesis(&connection_pool).await; // Insert a sealed batch so there will be a prev_l1_batch_state_root. // These gas values are random and don't matter for filter calculation. - tester.insert_miniblock(&connection_pool, 1, 5, 55, 555); - tester.insert_sealed_batch(&connection_pool, 1); + tester + .insert_miniblock(&connection_pool, 1, 5, 55, 555) + .await; + tester.insert_sealed_batch(&connection_pool, 1).await; // Create a copy of the tx filter that the mempool will use. let want_filter = l2_tx_filter( @@ -92,10 +103,7 @@ async fn test_filter_with_no_pending_batch(connection_pool: ConnectionPool) { ); // Create a mempool without pending batch and ensure that filter is not initialized just yet. - let (mut mempool, mut guard) = tester - .create_test_mempool_io(connection_pool) - .await - .unwrap(); + let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool, 1).await; assert_eq!(mempool.filter(), &L2TxFilter::default()); // Insert a transaction that matches the expected filter. @@ -109,6 +117,327 @@ async fn test_filter_with_no_pending_batch(connection_pool: ConnectionPool) { // should succeed and initialize the filter. mempool .wait_for_new_batch_params(Duration::from_secs(10)) + .await .expect("No batch params in the test mempool"); assert_eq!(mempool.filter(), &want_filter); } + +async fn test_l1_batch_timestamps_are_distinct( + connection_pool: ConnectionPool, + prev_l1_batch_timestamp: u64, +) { + let mut tester = Tester::new(); + tester.genesis(&connection_pool).await; + + tester.set_timestamp(prev_l1_batch_timestamp); + tester + .insert_miniblock(&connection_pool, 1, 5, 55, 555) + .await; + tester.insert_sealed_batch(&connection_pool, 1).await; + + let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool, 1).await; + // Insert a transaction to trigger L1 batch creation. + let tx_filter = l2_tx_filter( + &tester.create_gas_adjuster().await, + tester.fair_l2_gas_price(), + ); + tester.insert_tx(&mut guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata); + + let batch_params = mempool + .wait_for_new_batch_params(Duration::from_secs(10)) + .await + .expect("No batch params in the test mempool"); + assert!(batch_params.context_mode.timestamp() > prev_l1_batch_timestamp); +} + +#[db_test] +async fn l1_batch_timestamp_basics(connection_pool: ConnectionPool) { + let current_timestamp = (millis_since_epoch() / 1_000) as u64; + test_l1_batch_timestamps_are_distinct(connection_pool, current_timestamp).await; +} + +#[db_test] +async fn l1_batch_timestamp_with_clock_skew(connection_pool: ConnectionPool) { + let current_timestamp = (millis_since_epoch() / 1_000) as u64; + test_l1_batch_timestamps_are_distinct(connection_pool, current_timestamp + 2).await; +} + +#[db_test] +async fn processing_storage_logs_when_sealing_miniblock(connection_pool: ConnectionPool) { + let mut miniblock = MiniblockUpdates::new(0); + + let tx = create_transaction(10, 100); + let storage_logs = [ + (U256::from(1), Query::Read(U256::from(0))), + (U256::from(2), Query::InitialWrite(U256::from(1))), + ( + U256::from(3), + Query::RepeatedWrite(U256::from(2), U256::from(3)), + ), + ( + U256::from(2), + Query::RepeatedWrite(U256::from(1), U256::from(4)), + ), + ]; + let execution_result = create_execution_result(0, storage_logs); + miniblock.extend_from_executed_transaction( + tx, + execution_result, + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], + ); + + let tx = create_transaction(10, 100); + let storage_logs = [ + (U256::from(4), Query::InitialWrite(U256::from(5))), + ( + U256::from(3), + Query::RepeatedWrite(U256::from(3), U256::from(6)), + ), + ]; + let execution_result = create_execution_result(1, storage_logs); + miniblock.extend_from_executed_transaction( + tx, + execution_result, + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], + ); + + let l1_batch_number = L1BatchNumber(2); + let seal_command = MiniblockSealCommand { + l1_batch_number, + miniblock_number: MiniblockNumber(3), + miniblock, + first_tx_index: 0, + l1_gas_price: 100, + fair_l2_gas_price: 100, + base_fee_per_gas: 10, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + l2_erc20_bridge_addr: Address::default(), + }; + let mut conn = connection_pool.access_storage_tagged("state_keeper").await; + seal_command.seal(&mut conn).await; + + // Manually mark the miniblock as executed so that getting touched slots from it works + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(l1_batch_number) + .await; + let touched_slots = conn + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number) + .await; + + // Keys that are only read must not be written to `storage_logs`. + let account = AccountTreeId::default(); + let read_key = StorageKey::new(account, H256::from_low_u64_be(1)); + assert!(!touched_slots.contains_key(&read_key)); + + // The storage logs must be inserted and read in the correct order, so that + // `touched_slots` contain the most recent values in the L1 batch. + assert_eq!(touched_slots.len(), 3); + let written_kvs = [(2, 4), (3, 6), (4, 5)]; + for (key, value) in written_kvs { + let key = StorageKey::new(account, H256::from_low_u64_be(key)); + let expected_value = H256::from_low_u64_be(value); + assert_eq!(touched_slots[&key], expected_value); + } +} + +#[db_test] +async fn processing_events_when_sealing_miniblock(pool: ConnectionPool) { + let l1_batch_number = L1BatchNumber(2); + let mut miniblock = MiniblockUpdates::new(0); + + let events = (0_u8..10).map(|i| VmEvent { + location: (l1_batch_number, u32::from(i / 4)), + value: vec![i], + ..VmEvent::default() + }); + let events: Vec<_> = events.collect(); + + for (i, events_chunk) in events.chunks(4).enumerate() { + let tx = create_transaction(10, 100); + let mut execution_result = create_execution_result(i as u16, []); + execution_result.result.logs.events = events_chunk.to_vec(); + miniblock.extend_from_executed_transaction( + tx, + execution_result, + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], + ); + } + + let miniblock_number = MiniblockNumber(3); + let seal_command = MiniblockSealCommand { + l1_batch_number, + miniblock_number, + miniblock, + first_tx_index: 0, + l1_gas_price: 100, + fair_l2_gas_price: 100, + base_fee_per_gas: 10, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + l2_erc20_bridge_addr: Address::default(), + }; + let mut conn = pool.access_storage_tagged("state_keeper").await; + seal_command.seal(&mut conn).await; + + let logs = conn + .events_web3_dal() + .get_all_logs(miniblock_number - 1) + .await + .unwrap(); + + assert_eq!(logs.len(), 10); + // The event logs should be inserted in the correct order. + for (i, log) in logs.iter().enumerate() { + assert_eq!(log.data.0, [i as u8]); + } +} + +async fn test_miniblock_and_l1_batch_processing( + pool: ConnectionPool, + miniblock_sealer_capacity: usize, +) { + let tester = Tester::new(); + + // Genesis is needed for proper mempool initialization. + tester.genesis(&pool).await; + let mut conn = pool.access_storage_tagged("state_keeper").await; + // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. + let block_metadata = create_block_metadata(0); + conn.blocks_dal() + .save_blocks_metadata(L1BatchNumber(0), &block_metadata, H256::zero()) + .await; + drop(conn); + + let (mut mempool, _) = tester + .create_test_mempool_io(pool.clone(), miniblock_sealer_capacity) + .await; + + let mut block_context = default_block_context(); + block_context.context.block_timestamp = 100; // change timestamp to pass monotonicity check + let block_context_mode = BlockContextMode::NewBlock(block_context, 0.into()); + let mut updates = + UpdatesManager::new(&block_context_mode, BaseSystemContractsHashes::default()); + + let tx = create_transaction(10, 100); + updates.extend_from_executed_transaction( + tx, + create_execution_result(0, []), + vec![], + BlockGasCount::default(), + ExecutionMetrics::default(), + ); + mempool.seal_miniblock(&updates).await; + updates.push_miniblock(1); + + let block_result = default_vm_block_result(); + mempool + .seal_l1_batch(block_result, updates, block_context) + .await; + + // Check that miniblock #1 and L1 batch #1 are persisted. + let mut conn = pool.access_storage_tagged("state_keeper").await; + assert_eq!( + conn.blocks_dal().get_sealed_miniblock_number().await, + MiniblockNumber(2) // + fictive miniblock + ); + let l1_batch_header = conn + .blocks_dal() + .get_block_header(L1BatchNumber(1)) + .await + .unwrap(); + assert_eq!(l1_batch_header.l2_tx_count, 1); + assert!(l1_batch_header.is_finished); +} + +#[db_test] +async fn miniblock_and_l1_batch_processing(pool: ConnectionPool) { + test_miniblock_and_l1_batch_processing(pool, 1).await; +} + +#[db_test] +async fn miniblock_and_l1_batch_processing_with_sync_sealer(pool: ConnectionPool) { + test_miniblock_and_l1_batch_processing(pool, 0).await; +} + +#[db_test] +async fn miniblock_sealer_handle_blocking(pool: ConnectionPool) { + let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 1); + + // The first command should be successfully submitted immediately. + let updates_manager = create_updates_manager(); + let seal_command = updates_manager.seal_miniblock_command( + L1BatchNumber(1), + MiniblockNumber(1), + Address::default(), + ); + sealer_handle.submit(seal_command).await; + + // The second command should lead to blocking + let seal_command = updates_manager.seal_miniblock_command( + L1BatchNumber(1), + MiniblockNumber(2), + Address::default(), + ); + { + let submit_future = sealer_handle.submit(seal_command); + futures::pin_mut!(submit_future); + + assert!((&mut submit_future).now_or_never().is_none()); + // ...until miniblock #1 is processed + let command = sealer.commands_receiver.recv().await.unwrap(); + command.completion_sender.send(()).unwrap_err(); // completion receiver should be dropped + submit_future.await; + } + + { + let wait_future = sealer_handle.wait_for_all_commands(); + futures::pin_mut!(wait_future); + assert!((&mut wait_future).now_or_never().is_none()); + let command = sealer.commands_receiver.recv().await.unwrap(); + command.completion_sender.send(()).unwrap(); + wait_future.await; + } + + // Check that `wait_for_all_commands()` state is reset after use. + sealer_handle.wait_for_all_commands().await; + + let seal_command = updates_manager.seal_miniblock_command( + L1BatchNumber(2), + MiniblockNumber(3), + Address::default(), + ); + sealer_handle.submit(seal_command).await; + let command = sealer.commands_receiver.recv().await.unwrap(); + command.completion_sender.send(()).unwrap(); + sealer_handle.wait_for_all_commands().await; +} + +#[db_test] +async fn miniblock_sealer_handle_parallel_processing(pool: ConnectionPool) { + let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 5); + + // 5 miniblock sealing commands can be submitted without blocking. + for i in 1..=5 { + let updates_manager = create_updates_manager(); + let seal_command = updates_manager.seal_miniblock_command( + L1BatchNumber(1), + MiniblockNumber(i), + Address::default(), + ); + sealer_handle.submit(seal_command).await; + } + + for i in 1..=5 { + let command = sealer.commands_receiver.recv().await.unwrap(); + assert_eq!(command.command.miniblock_number, MiniblockNumber(i)); + command.completion_sender.send(()).ok(); + } + + sealer_handle.wait_for_all_commands().await; +} diff --git a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs index 7841cef8e907..88d5a9401530 100644 --- a/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/io/tests/tester.rs @@ -1,28 +1,27 @@ //! Testing harness for the IO. -use crate::genesis::create_genesis_block; -use crate::l1_gas_price::GasAdjuster; -use crate::state_keeper::{MempoolGuard, MempoolIO}; -use std::{ - sync::{Arc, Mutex}, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; + +use zksync_config::configs::chain::StateKeeperConfig; use zksync_config::GasAdjusterConfig; use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; -use zksync_eth_client::{clients::mock::MockEthereum, types::Error}; -use zksync_mempool::MempoolStore; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; +use zksync_eth_client::clients::mock::MockEthereum; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, - Address, L1BatchNumber, MiniblockNumber, PriorityOpId, H256, + Address, L1BatchNumber, L2ChainId, MiniblockNumber, PriorityOpId, H256, +}; + +use crate::{ + genesis::create_genesis_block, + l1_gas_price::GasAdjuster, + state_keeper::{io::MiniblockSealer, tests::create_transaction, MempoolGuard, MempoolIO}, }; -use zksync_types::{L2ChainId, Nonce}; #[derive(Debug)] pub(super) struct Tester { base_system_contracts: BaseSystemContracts, + current_timestamp: u64, } impl Tester { @@ -30,6 +29,7 @@ impl Tester { let base_system_contracts = BaseSystemContracts::load_from_disk(); Self { base_system_contracts, + current_timestamp: 0, } } @@ -45,6 +45,7 @@ impl Tester { internal_l1_pricing_multiplier: 1.0, internal_enforced_l1_gas_price: None, poll_period: 10, + max_l1_gas_price: None, }; GasAdjuster::new(eth_client, gas_adjuster_config) @@ -60,41 +61,54 @@ impl Tester { pub(super) async fn create_test_mempool_io( &self, pool: ConnectionPool, - ) -> Result<(MempoolIO>, MempoolGuard), Error> { + miniblock_sealer_capacity: usize, + ) -> (MempoolIO>, MempoolGuard) { let gas_adjuster = Arc::new(self.create_gas_adjuster().await); + let mempool = MempoolGuard::new(PriorityOpId(0), 100); + let (miniblock_sealer, miniblock_sealer_handle) = + MiniblockSealer::new(pool.clone(), miniblock_sealer_capacity); + tokio::spawn(miniblock_sealer.run()); + + let base_contract_hashes = self.base_system_contracts.hashes(); + let config = StateKeeperConfig { + fair_l2_gas_price: self.fair_l2_gas_price(), + bootloader_hash: base_contract_hashes.bootloader, + default_aa_hash: base_contract_hashes.default_aa, + ..StateKeeperConfig::default() + }; + let l2_erc20_bridge_addr = Address::repeat_byte(0x5a); // Isn't relevant. + let io = MempoolIO::new( + mempool.clone(), + miniblock_sealer_handle, + gas_adjuster, + pool, + &config, + Duration::from_secs(1), + l2_erc20_bridge_addr, + ) + .await; + + (io, mempool) + } - let mempool = MempoolGuard(Arc::new(Mutex::new(MempoolStore::new( - PriorityOpId(0), - 100, - )))); - - Ok(( - MempoolIO::new( - mempool.clone(), - pool, - Address::default(), - self.fair_l2_gas_price(), - Duration::from_secs(1), - gas_adjuster, - self.base_system_contracts.hashes(), - ), - mempool, - )) + pub(super) fn set_timestamp(&mut self, timestamp: u64) { + self.current_timestamp = timestamp; } - pub(super) fn genesis(&self, pool: &ConnectionPool) { - let mut storage = pool.access_storage_blocking(); - if storage.blocks_dal().is_genesis_needed() { + pub(super) async fn genesis(&self, pool: &ConnectionPool) { + let mut storage = pool.access_storage_tagged("state_keeper").await; + if storage.blocks_dal().is_genesis_needed().await { create_genesis_block( &mut storage, Address::repeat_byte(0x01), L2ChainId(270), self.base_system_contracts.clone(), - ); + ) + .await; } } - pub(super) fn insert_miniblock( + pub(super) async fn insert_miniblock( &self, pool: &ConnectionPool, number: u32, @@ -102,42 +116,45 @@ impl Tester { l1_gas_price: u64, l2_fair_gas_price: u64, ) { - let mut storage = pool.access_storage_blocking(); - storage.blocks_dal().insert_miniblock(MiniblockHeader { - number: MiniblockNumber(number), - timestamp: 0, - hash: Default::default(), - l1_tx_count: 0, - l2_tx_count: 0, - base_fee_per_gas, - l1_gas_price, - l2_fair_gas_price, - base_system_contracts_hashes: self.base_system_contracts.hashes(), - }); + let mut storage = pool.access_storage_tagged("state_keeper").await; + storage + .blocks_dal() + .insert_miniblock(&MiniblockHeader { + number: MiniblockNumber(number), + timestamp: self.current_timestamp, + hash: H256::default(), + l1_tx_count: 0, + l2_tx_count: 0, + base_fee_per_gas, + l1_gas_price, + l2_fair_gas_price, + base_system_contracts_hashes: self.base_system_contracts.hashes(), + }) + .await; } - pub(super) fn insert_sealed_batch(&self, pool: &ConnectionPool, number: u32) { + pub(super) async fn insert_sealed_batch(&self, pool: &ConnectionPool, number: u32) { let mut batch_header = L1BatchHeader::new( L1BatchNumber(number), - 0, + self.current_timestamp, Address::default(), self.base_system_contracts.hashes(), ); batch_header.is_finished = true; - let mut storage = pool.access_storage_blocking(); - + let mut storage = pool.access_storage_tagged("state_keeper").await; storage .blocks_dal() - .insert_l1_batch(batch_header.clone(), Default::default()); - + .insert_l1_batch(&batch_header, Default::default()) + .await; storage .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(batch_header.number); - + .mark_miniblocks_as_executed_in_l1_batch(batch_header.number) + .await; storage .blocks_dal() - .set_l1_batch_hash(batch_header.number, H256::default()); + .set_l1_batch_hash(batch_header.number, H256::default()) + .await; } pub(super) fn insert_tx( @@ -146,29 +163,7 @@ impl Tester { fee_per_gas: u64, gas_per_pubdata: u32, ) { - let fee = Fee { - gas_limit: 1000u64.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), - }; - let mut tx = L2Tx::new_signed( - Address::random(), - vec![], - Nonce(0), - fee, - Default::default(), - L2ChainId(271), - &H256::repeat_byte(0x11u8), - None, - Default::default(), - ) - .unwrap(); - // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. - // This input will be used for the derivation of the tx hash, so put some random to it to be sure - // that the transaction hash is unique. - tx.set_input(H256::random().0.to_vec(), H256::random()); - - guard.insert(vec![tx.into()], Default::default()); + let tx = create_transaction(fee_per_gas, gas_per_pubdata); + guard.insert(vec![tx], Default::default()); } } diff --git a/core/bin/zksync_core/src/state_keeper/keeper.rs b/core/bin/zksync_core/src/state_keeper/keeper.rs index c6c2d348037a..adfc37d4a02d 100644 --- a/core/bin/zksync_core/src/state_keeper/keeper.rs +++ b/core/bin/zksync_core/src/state_keeper/keeper.rs @@ -1,8 +1,8 @@ -use std::time::{Duration, Instant}; +use tokio::sync::watch; -use tokio::sync::watch::Receiver; +use std::time::{Duration, Instant}; -use vm::{transaction_data::TransactionData, TxRevertReason}; +use vm::TxRevertReason; use zksync_types::{ storage_writes_deduplicator::StorageWritesDeduplicator, MiniblockNumber, Transaction, }; @@ -10,8 +10,9 @@ use zksync_types::{ use crate::gas_tracker::gas_count_from_writes; use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, L1BatchExecutorBuilder, TxExecutionResult}, + extractors, io::{L1BatchParams, PendingBatchData, StateKeeperIO}, - seal_criteria::{SealManager, SealResolution}, + seal_criteria::{SealData, SealManager, SealResolution}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, }; @@ -35,7 +36,7 @@ struct Canceled; /// a sequence of executed miniblocks and batches. #[derive(Debug)] pub struct ZkSyncStateKeeper { - stop_receiver: Receiver, + stop_receiver: watch::Receiver, io: Box, batch_executor_base: Box, sealer: SealManager, @@ -43,7 +44,7 @@ pub struct ZkSyncStateKeeper { impl ZkSyncStateKeeper { pub fn new( - stop_receiver: Receiver, + stop_receiver: watch::Receiver, io: Box, batch_executor_base: Box, sealer: SealManager, @@ -56,8 +57,8 @@ impl ZkSyncStateKeeper { } } - pub fn run(mut self) { - match self.run_inner() { + pub async fn run(mut self) { + match self.run_inner().await { Ok(()) => { // Normally, state keeper can only exit its routine if the task was cancelled. panic!("State keeper exited the main loop") @@ -69,7 +70,7 @@ impl ZkSyncStateKeeper { } /// Fallible version of `run` routine that allows to easily exit upon cancellation. - fn run_inner(&mut self) -> Result<(), Canceled> { + async fn run_inner(&mut self) -> Result<(), Canceled> { vlog::info!( "Starting state keeper. Next l1 batch to seal: {}, Next miniblock to seal: {}", self.io.current_l1_batch_number(), @@ -80,7 +81,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { params, txs: txs_to_reexecute, - } = match self.io.load_pending_batch() { + } = match self.io.load_pending_batch().await { Some(params) => { vlog::info!( "There exists a pending batch consisting of {} miniblocks, the first one is {}", @@ -96,50 +97,64 @@ impl ZkSyncStateKeeper { None => { vlog::info!("There is no open pending batch, starting a new empty batch"); PendingBatchData { - params: self.wait_for_new_batch_params()?, + params: self.wait_for_new_batch_params().await?, txs: Vec::new(), } } }; let mut l1_batch_params = params; - let mut updates_manager = UpdatesManager::new( &l1_batch_params.context_mode, l1_batch_params.base_system_contracts.hashes(), ); - let mut batch_executor = self.batch_executor_base.init_batch(l1_batch_params.clone()); - self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute)?; + let mut batch_executor = self + .batch_executor_base + .init_batch(l1_batch_params.clone()) + .await; + self.restore_state(&batch_executor, &mut updates_manager, txs_to_reexecute) + .await?; + let mut l1_batch_seal_delta: Option = None; loop { self.check_if_cancelled()?; // This function will run until the batch can be sealed. - self.process_l1_batch(&batch_executor, &mut updates_manager)?; + self.process_l1_batch(&batch_executor, &mut updates_manager) + .await?; // Finish current batch. if !updates_manager.miniblock.executed_transactions.is_empty() { - self.io.seal_miniblock(&updates_manager); - // We've sealed the miniblock that we had, but we still need to setup the timestamp for the - // fictive miniblock. - let fictive_miniblock_timestamp = self.wait_for_new_miniblock_params()?; - updates_manager.seal_miniblock(fictive_miniblock_timestamp); + self.io.seal_miniblock(&updates_manager).await; + // We've sealed the miniblock that we had, but we still need to setup the timestamp + // for the fictive miniblock. + let fictive_miniblock_timestamp = self.wait_for_new_miniblock_params().await?; + updates_manager.push_miniblock(fictive_miniblock_timestamp); } - let block_result = batch_executor.finish_batch(); - self.io.seal_l1_batch( - block_result, - updates_manager, - l1_batch_params.context_mode.inner_block_context(), - ); + let block_result = batch_executor.finish_batch().await; + self.io + .seal_l1_batch( + block_result, + updates_manager, + l1_batch_params.context_mode.inner_block_context(), + ) + .await; + if let Some(delta) = l1_batch_seal_delta { + metrics::histogram!("server.state_keeper.l1_batch.seal_delta", delta.elapsed()); + } + l1_batch_seal_delta = Some(Instant::now()); // Start the new batch. - l1_batch_params = self.wait_for_new_batch_params()?; + l1_batch_params = self.wait_for_new_batch_params().await?; updates_manager = UpdatesManager::new( &l1_batch_params.context_mode, l1_batch_params.base_system_contracts.hashes(), ); - batch_executor = self.batch_executor_base.init_batch(l1_batch_params.clone()); + batch_executor = self + .batch_executor_base + .init_batch(l1_batch_params.clone()) + .await; } } @@ -150,9 +165,9 @@ impl ZkSyncStateKeeper { Ok(()) } - fn wait_for_new_batch_params(&mut self) -> Result { + async fn wait_for_new_batch_params(&mut self) -> Result { let params = loop { - if let Some(params) = self.io.wait_for_new_batch_params(POLL_WAIT_DURATION) { + if let Some(params) = self.io.wait_for_new_batch_params(POLL_WAIT_DURATION).await { break params; } self.check_if_cancelled()?; @@ -160,9 +175,13 @@ impl ZkSyncStateKeeper { Ok(params) } - fn wait_for_new_miniblock_params(&mut self) -> Result { + async fn wait_for_new_miniblock_params(&mut self) -> Result { let params = loop { - if let Some(params) = self.io.wait_for_new_miniblock_params(POLL_WAIT_DURATION) { + if let Some(params) = self + .io + .wait_for_new_miniblock_params(POLL_WAIT_DURATION) + .await + { break params; } self.check_if_cancelled()?; @@ -175,7 +194,7 @@ impl ZkSyncStateKeeper { /// batch, we need to restore the state. We must ensure that every transaction is executed successfully. /// /// Additionally, it initialized the next miniblock timestamp. - fn restore_state( + async fn restore_state( &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, @@ -188,8 +207,7 @@ impl ZkSyncStateKeeper { miniblock_number ); for tx in txs { - let result = batch_executor.execute_tx(tx.clone()); - + let result = batch_executor.execute_tx(tx.clone()).await; let TxExecutionResult::Success { tx_result, tx_metrics, @@ -210,46 +228,43 @@ impl ZkSyncStateKeeper { let exec_result_status = tx_result.status; + let tx_hash = tx.hash(); + let initiator_account = tx.initiator_account(); + let is_l1 = tx.is_l1(); updates_manager.extend_from_executed_transaction( - &tx, + tx, *tx_result, compressed_bytecodes, tx_l1_gas_this_tx, tx_execution_metrics, ); vlog::debug!( - "finished reexecuting tx {} by {} (is_l1: {}) (#{} in l1 batch {}) \ - (#{} in miniblock {}) status: {:?}. L1 gas spent: {:?}, total in l1 batch: {:?}, \ - tx execution metrics: {:?}, block execution metrics: {:?}", - tx.hash(), - tx.initiator_account(), - tx.is_l1(), - updates_manager.pending_executed_transactions_len(), - self.io.current_l1_batch_number().0, - updates_manager.miniblock.executed_transactions.len(), - miniblock_number, - exec_result_status, - tx_l1_gas_this_tx, - updates_manager.pending_l1_gas_count(), - &tx_execution_metrics, - updates_manager.pending_execution_metrics(), + "Finished re-executing tx {tx_hash} by {initiator_account} (is_l1: {is_l1}, \ + #{idx_in_l1_batch} in L1 batch {l1_batch_number}, #{idx_in_miniblock} in miniblock {miniblock_number}); \ + status: {exec_result_status:?}. L1 gas spent: {tx_l1_gas_this_tx:?}, total in L1 batch: {pending_l1_gas:?}, \ + tx execution metrics: {tx_execution_metrics:?}, block execution metrics: {block_execution_metrics:?}", + idx_in_l1_batch = updates_manager.pending_executed_transactions_len(), + l1_batch_number = self.io.current_l1_batch_number().0, + idx_in_miniblock = updates_manager.miniblock.executed_transactions.len(), + pending_l1_gas = updates_manager.pending_l1_gas_count(), + block_execution_metrics = updates_manager.pending_execution_metrics() ); } if idx == miniblocks_count - 1 { // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. - let new_timestamp = self.wait_for_new_miniblock_params()?; - updates_manager.seal_miniblock(new_timestamp); + let new_timestamp = self.wait_for_new_miniblock_params().await?; + updates_manager.push_miniblock(new_timestamp); } else { // For all the blocks except the last one we pass 0 as a timestamp, since we don't expect it to be used // anywhere. Using an obviously wrong value would make bugs easier to spot. - updates_manager.seal_miniblock(0); + updates_manager.push_miniblock(0); } } Ok(()) } - fn process_l1_batch( + async fn process_l1_batch( &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, @@ -260,15 +275,33 @@ impl ZkSyncStateKeeper { .sealer .should_seal_l1_batch_unconditionally(updates_manager) { + vlog::debug!( + "L1 batch #{} should be sealed unconditionally as per sealing rules", + self.io.current_l1_batch_number() + ); return Ok(()); } + if self.sealer.should_seal_miniblock(updates_manager) { - self.io.seal_miniblock(updates_manager); - let new_timestamp = self.wait_for_new_miniblock_params()?; - updates_manager.seal_miniblock(new_timestamp); + vlog::debug!( + "Miniblock #{} (L1 batch #{}) should be sealed as per sealing rules", + self.io.current_miniblock_number(), + self.io.current_l1_batch_number() + ); + self.io.seal_miniblock(updates_manager).await; + + let new_timestamp = self.wait_for_new_miniblock_params().await?; + vlog::debug!( + "Initialized new miniblock #{} (L1 batch #{}) with timestamp {}", + self.io.current_miniblock_number(), + self.io.current_l1_batch_number(), + extractors::display_timestamp(new_timestamp) + ); + updates_manager.push_miniblock(new_timestamp); } + let started_waiting = Instant::now(); - let Some(tx) = self.io.wait_for_next_tx(POLL_WAIT_DURATION) else { + let Some(tx) = self.io.wait_for_next_tx(POLL_WAIT_DURATION).await else { metrics::histogram!("server.state_keeper.waiting_for_tx", started_waiting.elapsed()); vlog::trace!("No new transactions. Waiting!"); continue; @@ -278,28 +311,29 @@ impl ZkSyncStateKeeper { started_waiting.elapsed(), ); - let (seal_resolution, exec_result) = - self.process_one_tx(batch_executor, updates_manager, &tx); + let tx_hash = tx.hash(); + let (seal_resolution, exec_result) = self + .process_one_tx(batch_executor, updates_manager, tx.clone()) + .await; match &seal_resolution { SealResolution::NoSeal | SealResolution::IncludeAndSeal => { let TxExecutionResult::Success { - tx_result, - tx_metrics, - compressed_bytecodes, - .. - } = exec_result else { - panic!( - "Tx inclusion seal resolution must be a result of a successful tx execution", - ); - }; + tx_result, + tx_metrics, + compressed_bytecodes, + .. + } = exec_result else { + unreachable!( + "Tx inclusion seal resolution must be a result of a successful tx execution", + ); + }; let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - .. } = tx_metrics; updates_manager.extend_from_executed_transaction( - &tx, + tx, *tx_result, compressed_bytecodes, tx_l1_gas_this_tx, @@ -307,16 +341,21 @@ impl ZkSyncStateKeeper { ); } SealResolution::ExcludeAndSeal => { - batch_executor.rollback_last_tx(); - self.io.rollback(&tx); + batch_executor.rollback_last_tx().await; + self.io.rollback(tx).await; } SealResolution::Unexecutable(reason) => { - batch_executor.rollback_last_tx(); - self.io.reject(&tx, reason); + batch_executor.rollback_last_tx().await; + self.io.reject(&tx, reason).await; } }; if seal_resolution.should_seal() { + vlog::debug!( + "L1 batch #{} should be sealed with resolution {seal_resolution:?} after executing \ + transaction {tx_hash}", + self.io.current_l1_batch_number() + ); return Ok(()); } } @@ -329,22 +368,21 @@ impl ZkSyncStateKeeper { /// 2. Seal manager decided that batch is ready to be sealed. /// Note: this method doesn't mutate `updates_manager` in the end. However, reference should be mutable /// because we use `apply_and_rollback` method of `updates_manager.storage_writes_deduplicator`. - fn process_one_tx( + async fn process_one_tx( &mut self, batch_executor: &BatchExecutorHandle, updates_manager: &mut UpdatesManager, - tx: &Transaction, + tx: Transaction, ) -> (SealResolution, TxExecutionResult) { - let exec_result = batch_executor.execute_tx(tx.clone()); - - match exec_result.clone() { + let exec_result = batch_executor.execute_tx(tx.clone()).await; + let resolution = match &exec_result { TxExecutionResult::BootloaderOutOfGasForTx => { metrics::increment_counter!( "server.tx_aggregation.reason", "criterion" => "bootloader_tx_out_of_gas", "seal_resolution" => "exclude_and_seal", ); - (SealResolution::ExcludeAndSeal, exec_result) + SealResolution::ExcludeAndSeal } TxExecutionResult::BootloaderOutOfGasForBlockTip => { metrics::increment_counter!( @@ -352,7 +390,7 @@ impl ZkSyncStateKeeper { "criterion" => "bootloader_block_tip_failed", "seal_resolution" => "exclude_and_seal", ); - (SealResolution::ExcludeAndSeal, exec_result) + SealResolution::ExcludeAndSeal } TxExecutionResult::RejectedByVm { rejection_reason } => match rejection_reason { TxRevertReason::NotEnoughGasProvided => { @@ -361,12 +399,9 @@ impl ZkSyncStateKeeper { "criterion" => "not_enough_gas_provided_to_start_tx", "seal_resolution" => "exclude_and_seal", ); - (SealResolution::ExcludeAndSeal, exec_result) + SealResolution::ExcludeAndSeal } - _ => ( - SealResolution::Unexecutable(rejection_reason.to_string()), - exec_result, - ), + _ => SealResolution::Unexecutable(rejection_reason.to_string()), }, TxExecutionResult::Success { tx_result, @@ -379,7 +414,7 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: tx_l1_gas_this_tx, execution_metrics: tx_execution_metrics, - } = tx_metrics; + } = *tx_metrics; vlog::trace!( "finished tx {:?} by {:?} (is_l1: {}) (#{} in l1 batch {}) (#{} in miniblock {}) \ @@ -402,48 +437,48 @@ impl ZkSyncStateKeeper { let ExecutionMetricsForCriteria { l1_gas: finish_block_l1_gas, execution_metrics: finish_block_execution_metrics, - .. - } = bootloader_dry_run_metrics; - - let tx_data: TransactionData = tx.clone().into(); - let encoding_len = tx_data.into_tokens().len(); - - let logs_to_apply_iter = tx_result - .result - .logs - .storage_logs - .iter() - .chain(&bootloader_dry_run_result.logs.storage_logs); + } = *bootloader_dry_run_metrics; + + let encoding_len = extractors::encoded_transaction_size(tx); + + let logs_to_apply = tx_result.result.logs.storage_logs.iter(); + let logs_to_apply = + logs_to_apply.chain(&bootloader_dry_run_result.logs.storage_logs); let block_writes_metrics = updates_manager .storage_writes_deduplicator - .apply_and_rollback(logs_to_apply_iter.clone()); + .apply_and_rollback(logs_to_apply.clone()); let block_writes_l1_gas = gas_count_from_writes(&block_writes_metrics); let tx_writes_metrics = - StorageWritesDeduplicator::apply_on_empty_state(logs_to_apply_iter); + StorageWritesDeduplicator::apply_on_empty_state(logs_to_apply); let tx_writes_l1_gas = gas_count_from_writes(&tx_writes_metrics); + let tx_gas_excluding_writes = tx_l1_gas_this_tx + finish_block_l1_gas; - let resolution = self.sealer.should_seal_l1_batch( + let tx_data = SealData { + execution_metrics: tx_execution_metrics + finish_block_execution_metrics, + gas_count: tx_gas_excluding_writes + tx_writes_l1_gas, + cumulative_size: encoding_len, + writes_metrics: tx_writes_metrics, + }; + let block_data = SealData { + execution_metrics: tx_data.execution_metrics + + updates_manager.pending_execution_metrics(), + gas_count: tx_gas_excluding_writes + + block_writes_l1_gas + + updates_manager.pending_l1_gas_count(), + cumulative_size: tx_data.cumulative_size + + updates_manager.pending_txs_encoding_size(), + writes_metrics: block_writes_metrics, + }; + self.sealer.should_seal_l1_batch( self.io.current_l1_batch_number().0, - updates_manager.batch_timestamp() as u128 * 1000, + updates_manager.batch_timestamp() as u128 * 1_000, updates_manager.pending_executed_transactions_len() + 1, - updates_manager.pending_execution_metrics() - + tx_execution_metrics - + finish_block_execution_metrics, - tx_execution_metrics + finish_block_execution_metrics, - updates_manager.pending_l1_gas_count() - + tx_l1_gas_this_tx - + finish_block_l1_gas - + block_writes_l1_gas, - tx_l1_gas_this_tx + finish_block_l1_gas + tx_writes_l1_gas, - updates_manager.pending_txs_encoding_size() + encoding_len, - encoding_len, - block_writes_metrics, - tx_writes_metrics, - ); - - (resolution, exec_result) + &block_data, + &tx_data, + ) } - } + }; + (resolution, exec_result) } } diff --git a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs index 30d7a1597b2d..30a2335ee1fd 100644 --- a/core/bin/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/bin/zksync_core/src/state_keeper/mempool_actor.rs @@ -5,7 +5,8 @@ use std::time::Duration; use std::time::Instant; use tokio::sync::watch; use vm::vm_with_bootloader::derive_base_fee_and_gas_per_pubdata; -use zksync_config::ZkSyncConfig; +use zksync_config::configs::chain::MempoolConfig; + use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; @@ -39,13 +40,13 @@ impl MempoolFetcher { pub fn new( mempool: MempoolGuard, l1_gas_price_provider: Arc, - config: &ZkSyncConfig, + config: &MempoolConfig, ) -> Self { Self { mempool, l1_gas_price_provider, - sync_interval: config.chain.mempool.sync_interval(), - sync_batch_size: config.chain.mempool.sync_batch_size, + sync_interval: config.sync_interval(), + sync_batch_size: config.sync_batch_size, } } @@ -58,14 +59,15 @@ impl MempoolFetcher { stop_receiver: watch::Receiver, ) { { - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("state_keeper").await; if remove_stuck_txs { let removed_txs = storage .transactions_dal() - .remove_stuck_txs(stuck_tx_timeout); + .remove_stuck_txs(stuck_tx_timeout) + .await; vlog::info!("Number of stuck txs was removed: {}", removed_txs); } - storage.transactions_dal().reset_mempool(); + storage.transactions_dal().reset_mempool().await; } loop { @@ -74,17 +76,20 @@ impl MempoolFetcher { break; } let started_at = Instant::now(); - let mut storage = pool.access_storage_blocking(); + let mut storage = pool.access_storage_tagged("state_keeper").await; let mempool_info = self.mempool.get_mempool_info(); let l2_tx_filter = l2_tx_filter(self.l1_gas_price_provider.as_ref(), fair_l2_gas_price); - let (transactions, nonces) = storage.transactions_dal().sync_mempool( - mempool_info.stashed_accounts, - mempool_info.purged_accounts, - l2_tx_filter.gas_per_pubdata, - l2_tx_filter.fee_per_gas, - self.sync_batch_size, - ); + let (transactions, nonces) = storage + .transactions_dal() + .sync_mempool( + mempool_info.stashed_accounts, + mempool_info.purged_accounts, + l2_tx_filter.gas_per_pubdata, + l2_tx_filter.fee_per_gas, + self.sync_batch_size, + ) + .await; let all_transactions_loaded = transactions.len() < self.sync_batch_size; self.mempool.insert(transactions, nonces); metrics::histogram!("server.state_keeper.mempool_sync", started_at.elapsed()); diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index bec3aed24e48..e9fb5268f7b9 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -1,66 +1,75 @@ -use std::sync::Arc; +use tokio::sync::watch; -use tokio::sync::watch::Receiver; +use std::sync::Arc; -use zksync_config::constants::MAX_TXS_IN_BLOCK; -use zksync_config::ZkSyncConfig; -use zksync_contracts::BaseSystemContractsHashes; +use zksync_config::{ + configs::chain::{MempoolConfig, StateKeeperConfig}, + constants::MAX_TXS_IN_BLOCK, + ContractsConfig, DBConfig, +}; use zksync_dal::ConnectionPool; -use self::batch_executor::MainBatchExecutorBuilder; -use self::io::MempoolIO; -use crate::l1_gas_price::L1GasPriceProvider; -use crate::state_keeper::seal_criteria::SealManager; - -pub use self::{keeper::ZkSyncStateKeeper, types::MempoolGuard}; - -pub mod batch_executor; +mod batch_executor; pub(crate) mod extractors; pub(crate) mod io; mod keeper; -pub(crate) mod mempool_actor; -pub mod seal_criteria; +mod mempool_actor; +pub(crate) mod seal_criteria; #[cfg(test)] mod tests; -pub(crate) mod types; +mod types; pub(crate) mod updates; -pub(crate) fn start_state_keeper( - config: &ZkSyncConfig, - pool: &ConnectionPool, +pub use self::{ + batch_executor::MainBatchExecutorBuilder, keeper::ZkSyncStateKeeper, seal_criteria::SealManager, +}; +pub(crate) use self::{io::MiniblockSealer, mempool_actor::MempoolFetcher, types::MempoolGuard}; + +use self::io::{MempoolIO, MiniblockSealerHandle}; +use crate::l1_gas_price::L1GasPriceProvider; + +#[allow(clippy::too_many_arguments)] +pub(crate) async fn create_state_keeper( + contracts_config: &ContractsConfig, + state_keeper_config: StateKeeperConfig, + db_config: &DBConfig, + mempool_config: &MempoolConfig, + pool: ConnectionPool, mempool: MempoolGuard, l1_gas_price_provider: Arc, - stop_receiver: Receiver, + miniblock_sealer_handle: MiniblockSealerHandle, + stop_receiver: watch::Receiver, ) -> ZkSyncStateKeeper where - G: L1GasPriceProvider + 'static + std::fmt::Debug + Send + Sync, + G: L1GasPriceProvider + 'static + Send + Sync, { assert!( - config.chain.state_keeper.transaction_slots <= MAX_TXS_IN_BLOCK, - "Configured transaction_slots must be lower than the bootloader constant MAX_TXS_IN_BLOCK" + state_keeper_config.transaction_slots <= MAX_TXS_IN_BLOCK, + "Configured transaction_slots ({}) must be lower than the bootloader constant MAX_TXS_IN_BLOCK={}", + state_keeper_config.transaction_slots, + MAX_TXS_IN_BLOCK ); let batch_executor_base = MainBatchExecutorBuilder::new( - config.db.state_keeper_db_path.clone(), + db_config.state_keeper_db_path.clone(), pool.clone(), - config.chain.state_keeper.max_allowed_l2_tx_gas_limit.into(), - config.chain.state_keeper.save_call_traces, - config.chain.state_keeper.validation_computational_gas_limit, + state_keeper_config.max_allowed_l2_tx_gas_limit.into(), + state_keeper_config.save_call_traces, + state_keeper_config.validation_computational_gas_limit, ); + let io = MempoolIO::new( mempool, - pool.clone(), - config.chain.state_keeper.fee_account_addr, - config.chain.state_keeper.fair_l2_gas_price, - config.chain.operations_manager.delay_interval(), + miniblock_sealer_handle, l1_gas_price_provider, - BaseSystemContractsHashes { - bootloader: config.chain.state_keeper.bootloader_hash, - default_aa: config.chain.state_keeper.default_aa_hash, - }, - ); + pool, + &state_keeper_config, + mempool_config.delay_interval(), + contracts_config.l2_erc20_bridge_addr, + ) + .await; - let sealer = SealManager::new(config.chain.state_keeper.clone()); + let sealer = SealManager::new(state_keeper_config); ZkSyncStateKeeper::new( stop_receiver, Box::new(io), diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs index 77efc145d32d..394370c0199f 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs @@ -4,12 +4,8 @@ //! which unconditionally follows the instructions from the main node). use zksync_config::configs::chain::StateKeeperConfig; -use zksync_types::{ - block::BlockGasCount, - tx::{tx_execution_info::DeduplicatedWritesMetrics, ExecutionMetrics}, -}; -use super::{criteria, SealCriterion, SealResolution}; +use super::{criteria, SealCriterion, SealData, SealResolution}; #[derive(Debug)] pub struct ConditionalSealer { @@ -19,94 +15,75 @@ pub struct ConditionalSealer { } impl ConditionalSealer { - pub(crate) fn new(config: StateKeeperConfig) -> Self { - let sealers: Vec> = Self::get_default_sealers(); + /// Finds a reason why a transaction with the specified `data` is unexecutable. + pub(crate) fn find_unexecutable_reason( + config: &StateKeeperConfig, + data: &SealData, + ) -> Option<&'static str> { + for sealer in &Self::default_sealers() { + const MOCK_BLOCK_TIMESTAMP: u128 = 0; + const TX_COUNT: usize = 1; + let resolution = sealer.should_seal(config, MOCK_BLOCK_TIMESTAMP, TX_COUNT, data, data); + if matches!(resolution, SealResolution::Unexecutable(_)) { + return Some(sealer.prom_criterion_name()); + } + } + None + } + + pub(super) fn new(config: StateKeeperConfig) -> Self { + let sealers = Self::default_sealers(); Self { config, sealers } } #[cfg(test)] - pub(crate) fn with_sealers( + pub(in crate::state_keeper) fn with_sealers( config: StateKeeperConfig, sealers: Vec>, ) -> Self { Self { config, sealers } } - #[allow(clippy::too_many_arguments)] - pub(crate) fn should_seal_l1_batch( + pub(super) fn should_seal_l1_batch( &self, l1_batch_number: u32, block_open_timestamp_ms: u128, tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - block_gas_count: BlockGasCount, - tx_gas_count: BlockGasCount, - block_included_txs_size: usize, - tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { + vlog::debug!( + "Determining seal resolution for L1 batch #{l1_batch_number} with {tx_count} transactions \ + and metrics {:?}", + block_data.execution_metrics + ); + let mut final_seal_resolution = SealResolution::NoSeal; for sealer in &self.sealers { let seal_resolution = sealer.should_seal( &self.config, block_open_timestamp_ms, tx_count, - block_execution_metrics, - tx_execution_metrics, - block_gas_count, - tx_gas_count, - block_included_txs_size, - tx_size, - block_writes_metrics, - tx_writes_metrics, + block_data, + tx_data, ); - match seal_resolution { - SealResolution::IncludeAndSeal => { - vlog::debug!( - "Seal block with resolution: IncludeAndSeal {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics - ); - metrics::counter!( - "server.tx_aggregation.reason", - 1, - "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "include_and_seal", - ); - } - SealResolution::ExcludeAndSeal => { - vlog::debug!( - "Seal block with resolution: ExcludeAndSeal {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics - ); - metrics::counter!( - "server.tx_aggregation.reason", - 1, - "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "exclude_and_seal", - ); - } - SealResolution::Unexecutable(_) => { + match &seal_resolution { + SealResolution::IncludeAndSeal + | SealResolution::ExcludeAndSeal + | SealResolution::Unexecutable(_) => { vlog::debug!( - "Unexecutable {} {} block: {:?}", - l1_batch_number, - sealer.prom_criterion_name(), - block_execution_metrics + "L1 batch #{l1_batch_number} processed by `{name}` with resolution {seal_resolution:?}", + name = sealer.prom_criterion_name() ); metrics::counter!( "server.tx_aggregation.reason", 1, "criterion" => sealer.prom_criterion_name(), - "seal_resolution" => "unexecutable", + "seal_resolution" => seal_resolution.name(), ); } - _ => {} + SealResolution::NoSeal => { /* Don't do anything */ } } final_seal_resolution = final_seal_resolution.stricter(seal_resolution); @@ -114,17 +91,16 @@ impl ConditionalSealer { final_seal_resolution } - pub(crate) fn get_default_sealers() -> Vec> { - let sealers: Vec> = vec![ - Box::new(criteria::slots::SlotsCriterion), - Box::new(criteria::gas::GasCriterion), - Box::new(criteria::pubdata_bytes::PubDataBytesCriterion), - Box::new(criteria::geometry_seal_criteria::InitialWritesCriterion), - Box::new(criteria::geometry_seal_criteria::RepeatedWritesCriterion), - Box::new(criteria::geometry_seal_criteria::MaxCyclesCriterion), - Box::new(criteria::geometry_seal_criteria::ComputationalGasCriterion), - Box::new(criteria::tx_encoding_size::TxEncodingSizeCriterion), - ]; - sealers + fn default_sealers() -> Vec> { + vec![ + Box::new(criteria::SlotsCriterion), + Box::new(criteria::GasCriterion), + Box::new(criteria::PubDataBytesCriterion), + Box::new(criteria::InitialWritesCriterion), + Box::new(criteria::RepeatedWritesCriterion), + Box::new(criteria::MaxCyclesCriterion), + Box::new(criteria::ComputationalGasCriterion), + Box::new(criteria::TxEncodingSizeCriterion), + ] } } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs deleted file mode 100644 index a2d45583337b..000000000000 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/function.rs +++ /dev/null @@ -1,67 +0,0 @@ -pub(self) use zksync_config::configs::chain::StateKeeperConfig; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; - -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; - -/// Represents a thread-safe function pointer. -type CustomSealerFn = dyn Fn( - &StateKeeperConfig, - u128, - usize, - ExecutionMetrics, - ExecutionMetrics, - BlockGasCount, - BlockGasCount, - usize, - usize, - DeduplicatedWritesMetrics, - DeduplicatedWritesMetrics, - ) -> SealResolution - + Send - + 'static; - -/// Custom criterion made from a user-provided function. Allows to turn your closure into a seal criterion. -/// Mostly useful for tests. -pub(crate) struct FnCriterion(Box); - -impl std::fmt::Debug for FnCriterion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("FnCriterion").finish() - } -} - -impl SealCriterion for FnCriterion { - fn should_seal( - &self, - config: &StateKeeperConfig, - block_open_timestamp_ms: u128, - tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - block_gas_count: BlockGasCount, - tx_gas_count: BlockGasCount, - block_included_txs_size: usize, - tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, - ) -> SealResolution { - self.0( - config, - block_open_timestamp_ms, - tx_count, - block_execution_metrics, - tx_execution_metrics, - block_gas_count, - tx_gas_count, - block_included_txs_size, - tx_size, - block_writes_metrics, - tx_writes_metrics, - ) - } - - fn prom_criterion_name(&self) -> &'static str { - "function_sealer" - } -} diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs index 46834a2988fe..75df352a5bb5 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/gas.rs @@ -1,9 +1,9 @@ -use crate::gas_tracker::new_block_gas_count; -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; +use crate::{ + gas_tracker::new_block_gas_count, + state_keeper::seal_criteria::{SealCriterion, SealData, SealResolution, StateKeeperConfig}, +}; -/// This is a temporary solution +/// This is a temporary solution. /// Instead of checking for gas it simply checks that the contracts' /// bytecode is large enough. /// Among all the data which will be published on-chain the contracts' @@ -18,24 +18,22 @@ impl SealCriterion for GasCriterion { config: &StateKeeperConfig, _block_open_timestamp_ms: u128, _tx_count: usize, - _block_execution_metrics: ExecutionMetrics, - _tx_execution_metrics: ExecutionMetrics, - block_gas_count: BlockGasCount, - tx_gas_count: BlockGasCount, - _block_included_txs_size: usize, - _tx_size: usize, - _block_writes_metrics: DeduplicatedWritesMetrics, - _tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { - if (tx_gas_count + new_block_gas_count()).has_greater_than( - (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() as u32, - ) { + let tx_bound = + (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() as u32; + let block_bound = + (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() as u32; + + if (tx_data.gas_count + new_block_gas_count()).has_greater_than(tx_bound) { SealResolution::Unexecutable("Transaction requires too much gas".into()) - } else if block_gas_count.has_greater_than(config.max_single_tx_gas) { + } else if block_data + .gas_count + .has_greater_than(config.max_single_tx_gas) + { SealResolution::ExcludeAndSeal - } else if block_gas_count.has_greater_than( - (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() as u32, - ) { + } else if block_data.gas_count.has_greater_than(block_bound) { SealResolution::IncludeAndSeal } else { SealResolution::NoSeal @@ -49,31 +47,29 @@ impl SealCriterion for GasCriterion { #[cfg(test)] mod tests { + use zksync_types::block::BlockGasCount; - use super::{new_block_gas_count, BlockGasCount, GasCriterion, SealCriterion, SealResolution}; - use zksync_config::ZkSyncConfig; + use super::*; #[test] fn test_gas_seal_criterion() { - let config = ZkSyncConfig::from_env().chain.state_keeper; + let config = StateKeeperConfig::from_env(); let criterion = GasCriterion; // Empty block should fit into gas criterion. let empty_block_gas = new_block_gas_count(); let empty_block_resolution = criterion.should_seal( &config, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - empty_block_gas, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + 0, + &SealData { + gas_count: empty_block_gas, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); + let tx_gas = BlockGasCount { commit: config.max_single_tx_gas + 1, prove: 0, @@ -82,16 +78,16 @@ mod tests { // Transaction that needs more gas than a block limit should be unexecutable. let huge_transaction_resolution = criterion.should_seal( &config, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - empty_block_gas + tx_gas, - tx_gas, - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + 1, + &SealData { + gas_count: empty_block_gas + tx_gas, + ..SealData::default() + }, + &SealData { + gas_count: tx_gas, + ..SealData::default() + }, ); assert_eq!( huge_transaction_resolution, @@ -99,99 +95,69 @@ mod tests { ); // Check criterion workflow + let reject_tx_bound = + (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() as u32; let tx_gas = BlockGasCount { - commit: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.commit, - prove: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.prove, - execute: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.execute, + commit: reject_tx_bound - empty_block_gas.commit, + prove: reject_tx_bound - empty_block_gas.prove, + execute: reject_tx_bound - empty_block_gas.execute, }; let resolution_after_first_tx = criterion.should_seal( &config, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - empty_block_gas + tx_gas, - tx_gas, - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + 1, + &SealData { + gas_count: empty_block_gas + tx_gas, + ..SealData::default() + }, + &SealData { + gas_count: tx_gas, + ..SealData::default() + }, ); assert_eq!(resolution_after_first_tx, SealResolution::NoSeal); - // Check criterion workflow - let tx_gas = BlockGasCount { - commit: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.commit - - 1, - prove: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.prove - - 1, - execute: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.execute - - 1, - }; - - let block_gas = BlockGasCount { - commit: (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() - as u32 - + 1, - prove: (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() - as u32 - + 1, - execute: (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage) - .round() as u32 - + 1, - }; - let resolution_after_first_tx = criterion.should_seal( + let resolution_after_second_tx = criterion.should_seal( &config, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - block_gas, - tx_gas, - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + 2, + &SealData { + gas_count: empty_block_gas + tx_gas + tx_gas, + ..SealData::default() + }, + &SealData { + gas_count: tx_gas, + ..SealData::default() + }, ); - assert_eq!(resolution_after_first_tx, SealResolution::IncludeAndSeal); + assert_eq!(resolution_after_second_tx, SealResolution::ExcludeAndSeal); // Check criterion workflow let tx_gas = BlockGasCount { - commit: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.commit, - prove: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.prove, - execute: (config.max_single_tx_gas as f64 * config.reject_tx_at_gas_percentage).round() - as u32 - - empty_block_gas.execute, + commit: reject_tx_bound - empty_block_gas.commit - 1, + prove: reject_tx_bound - empty_block_gas.prove - 1, + execute: reject_tx_bound - empty_block_gas.execute - 1, + }; + let close_bound = + (config.max_single_tx_gas as f64 * config.close_block_at_gas_percentage).round() as u32; + let block_gas = BlockGasCount { + commit: close_bound + 1, + prove: close_bound + 1, + execute: close_bound + 1, }; let resolution_after_first_tx = criterion.should_seal( &config, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - empty_block_gas + tx_gas + tx_gas, - tx_gas, - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + 1, + &SealData { + gas_count: block_gas, + ..SealData::default() + }, + &SealData { + gas_count: tx_gas, + ..SealData::default() + }, ); - assert_eq!(resolution_after_first_tx, SealResolution::ExcludeAndSeal); + assert_eq!(resolution_after_first_tx, SealResolution::IncludeAndSeal); } } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index b211cc0acd7b..43c20183b333 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,14 +1,15 @@ -use std::fmt::Debug; +use std::fmt; + +// Workspace uses. use vm::{zk_evm::zkevm_opcode_defs::system_params::ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_types::circuit::GEOMETRY_CONFIG; use zksync_types::{ - block::BlockGasCount, - circuit::SCHEDULER_UPPER_BOUND, + circuit::{GEOMETRY_CONFIG, SCHEDULER_UPPER_BOUND}, tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, }; + // Local uses -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; +use crate::state_keeper::seal_criteria::{SealCriterion, SealData, SealResolution}; // Collected vm execution metrics should fit into geometry limits. // Otherwise witness generation will fail and proof won't be generated. @@ -30,34 +31,29 @@ trait MetricExtractor { impl SealCriterion for T where - T: MetricExtractor + Debug + Send + Sync + 'static, + T: MetricExtractor + fmt::Debug + Send + Sync + 'static, { fn should_seal( &self, config: &StateKeeperConfig, _block_open_timestamp_ms: u128, _tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - _block_gas_count: BlockGasCount, - _tx_gas_count: BlockGasCount, - _block_included_txs_size: usize, - _tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { - if T::extract(&tx_execution_metrics, &tx_writes_metrics) - > (T::limit_per_block() as f64 * config.reject_tx_at_geometry_percentage).round() - as usize - { + let reject_bound = + (T::limit_per_block() as f64 * config.reject_tx_at_geometry_percentage).round(); + let close_bound = + (T::limit_per_block() as f64 * config.close_block_at_geometry_percentage).round(); + + if T::extract(&tx_data.execution_metrics, &tx_data.writes_metrics) > reject_bound as usize { SealResolution::Unexecutable("ZK proof cannot be generated for a transaction".into()) - } else if T::extract(&block_execution_metrics, &block_writes_metrics) + } else if T::extract(&block_data.execution_metrics, &block_data.writes_metrics) >= T::limit_per_block() { SealResolution::ExcludeAndSeal - } else if T::extract(&block_execution_metrics, &block_writes_metrics) - > (T::limit_per_block() as f64 * config.close_block_at_geometry_percentage).round() - as usize + } else if T::extract(&block_data.execution_metrics, &block_data.writes_metrics) + > close_bound as usize { SealResolution::IncludeAndSeal } else { @@ -127,15 +123,7 @@ impl MetricExtractor for ComputationalGasCriterion { #[cfg(test)] mod tests { - use zksync_config::configs::chain::StateKeeperConfig; - use zksync_types::tx::tx_execution_info::DeduplicatedWritesMetrics; - use zksync_types::tx::ExecutionMetrics; - - use super::{ - ComputationalGasCriterion, InitialWritesCriterion, MaxCyclesCriterion, MetricExtractor, - RepeatedWritesCriterion, - }; - use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution}; + use super::*; fn get_config() -> StateKeeperConfig { StateKeeperConfig { @@ -155,14 +143,12 @@ mod tests { &config, Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - block_writes_metrics, - Default::default(), + &SealData { + execution_metrics: block_execution_metrics, + writes_metrics: block_writes_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(block_resolution, SealResolution::NoSeal); } @@ -177,14 +163,12 @@ mod tests { &config, Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - block_writes_metrics, - Default::default(), + &SealData { + execution_metrics: block_execution_metrics, + writes_metrics: block_writes_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(block_resolution, SealResolution::IncludeAndSeal); } @@ -199,14 +183,12 @@ mod tests { &config, Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - block_writes_metrics, - Default::default(), + &SealData { + execution_metrics: block_execution_metrics, + writes_metrics: block_writes_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(block_resolution, SealResolution::ExcludeAndSeal); } @@ -221,14 +203,12 @@ mod tests { &config, Default::default(), 0, - Default::default(), - tx_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - tx_writes_metrics, + &SealData::default(), + &SealData { + execution_metrics: tx_execution_metrics, + writes_metrics: tx_writes_metrics, + ..SealData::default() + }, ); assert_eq!( @@ -243,13 +223,13 @@ mod tests { let writes_metrics = DeduplicatedWritesMetrics::default(); let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block() / 2) as $metric_type, - ..Default::default() + ..ExecutionMetrics::default() }; test_no_seal_block_resolution(block_execution_metrics, writes_metrics, &$criterion); let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block() - 1) as $metric_type, - ..Default::default() + ..ExecutionMetrics::default() }; test_include_and_seal_block_resolution( @@ -260,7 +240,7 @@ mod tests { let block_execution_metrics = ExecutionMetrics { $metric_name: ($criterion::limit_per_block()) as $metric_type, - ..Default::default() + ..ExecutionMetrics::default() }; test_exclude_and_seal_block_resolution( @@ -274,7 +254,7 @@ mod tests { * config.reject_tx_at_geometry_percentage + 1f64) .round() as $metric_type, - ..Default::default() + ..ExecutionMetrics::default() }; test_unexecutable_tx_resolution(tx_execution_metrics, writes_metrics, &$criterion); @@ -282,7 +262,7 @@ mod tests { } macro_rules! test_scenario_writes_metrics { - ($criterion: tt, $metric_name: ident, $metric_type: ty) => { + ($criterion:tt, $metric_name:ident, $metric_type:ty) => { let config = get_config(); let execution_metrics = ExecutionMetrics::default(); let block_writes_metrics = DeduplicatedWritesMetrics { diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs index 3e0b7c5cb27d..39b7165d99d9 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/mod.rs @@ -1,8 +1,16 @@ -pub(crate) mod function; -pub(crate) mod gas; -pub(crate) mod slots; +mod gas; +mod geometry_seal_criteria; +mod pubdata_bytes; +mod slots; +mod tx_encoding_size; -pub(super) mod geometry_seal_criteria; -pub(super) mod pubdata_bytes; -pub(super) mod timeout; -pub(super) mod tx_encoding_size; +pub(in crate::state_keeper) use self::{ + gas::GasCriterion, + geometry_seal_criteria::{ + ComputationalGasCriterion, InitialWritesCriterion, MaxCyclesCriterion, + RepeatedWritesCriterion, + }, + pubdata_bytes::PubDataBytesCriterion, + slots::SlotsCriterion, + tx_encoding_size::TxEncodingSizeCriterion, +}; diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs index 906631b85997..43012b91e209 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/pubdata_bytes.rs @@ -1,7 +1,8 @@ -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use zksync_types::{block::BlockGasCount, MAX_PUBDATA_PER_L1_BATCH}; +use zksync_types::MAX_PUBDATA_PER_L1_BATCH; -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{ + SealCriterion, SealData, SealResolution, StateKeeperConfig, +}; #[derive(Debug)] pub struct PubDataBytesCriterion; @@ -12,32 +13,23 @@ impl SealCriterion for PubDataBytesCriterion { config: &StateKeeperConfig, _block_open_timestamp_ms: u128, _tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - _block_gas_count: BlockGasCount, - _tx_gas_count: BlockGasCount, - _block_included_txs_size: usize, - _tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { let max_pubdata_per_l1_batch = MAX_PUBDATA_PER_L1_BATCH as usize; + let reject_bound = + (max_pubdata_per_l1_batch as f64 * config.reject_tx_at_eth_params_percentage).round(); + let include_and_seal_bound = + (max_pubdata_per_l1_batch as f64 * config.close_block_at_eth_params_percentage).round(); + let block_size = block_data.execution_metrics.size() + block_data.writes_metrics.size(); + let tx_size = tx_data.execution_metrics.size() + tx_data.writes_metrics.size(); - let block_size = block_execution_metrics.size() + block_writes_metrics.size(); - let tx_size = tx_execution_metrics.size() + tx_writes_metrics.size(); - if tx_size - > (max_pubdata_per_l1_batch as f64 * config.reject_tx_at_eth_params_percentage).round() - as usize - { - SealResolution::Unexecutable( - "Transaction cannot be sent to L1 due to pubdata limits".into(), - ) + if tx_size > reject_bound as usize { + let message = "Transaction cannot be sent to L1 due to pubdata limits"; + SealResolution::Unexecutable(message.into()) } else if block_size > max_pubdata_per_l1_batch { SealResolution::ExcludeAndSeal - } else if block_size - > (max_pubdata_per_l1_batch as f64 * config.close_block_at_eth_params_percentage) - .round() as usize - { + } else if block_size > include_and_seal_bound as usize { SealResolution::IncludeAndSeal } else { SealResolution::NoSeal @@ -51,104 +43,68 @@ impl SealCriterion for PubDataBytesCriterion { #[cfg(test)] mod tests { - use super::{PubDataBytesCriterion, SealCriterion, SealResolution, MAX_PUBDATA_PER_L1_BATCH}; - use zksync_config::ZkSyncConfig; use zksync_types::tx::ExecutionMetrics; + use super::*; + #[test] fn seal_criterion() { - let config = ZkSyncConfig::from_env().chain.state_keeper; + let config = StateKeeperConfig::from_env(); let criterion = PubDataBytesCriterion; let block_execution_metrics = ExecutionMetrics { - contracts_deployed: 0, - contracts_used: 0, - gas_used: 0, l2_l1_long_messages: (MAX_PUBDATA_PER_L1_BATCH as f64 * config.close_block_at_eth_params_percentage - 1.0) .round() as usize, - published_bytecode_bytes: 0, - l2_l1_logs: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, + ..ExecutionMetrics::default() }; let empty_block_resolution = criterion.should_seal( &config, - Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + &SealData { + execution_metrics: block_execution_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); let block_execution_metrics = ExecutionMetrics { - contracts_deployed: 0, - contracts_used: 0, - gas_used: 0, l2_l1_long_messages: (MAX_PUBDATA_PER_L1_BATCH as f64 * config.close_block_at_eth_params_percentage + 1f64) .round() as usize, - published_bytecode_bytes: 0, - l2_l1_logs: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, + ..ExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( &config, - Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + &SealData { + execution_metrics: block_execution_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); let block_execution_metrics = ExecutionMetrics { - contracts_deployed: 0, - contracts_used: 0, - gas_used: 0, l2_l1_long_messages: MAX_PUBDATA_PER_L1_BATCH as usize + 1, - published_bytecode_bytes: 0, - l2_l1_logs: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, + ..ExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( &config, - Default::default(), 0, - block_execution_metrics, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + 0, + &SealData { + execution_metrics: block_execution_metrics, + ..SealData::default() + }, + &SealData::default(), ); assert_eq!(full_block_resolution, SealResolution::ExcludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs index 4c78aeae9ba2..3febdfa8b7b7 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/slots.rs @@ -1,6 +1,6 @@ -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; +use crate::state_keeper::seal_criteria::{ + SealCriterion, SealData, SealResolution, StateKeeperConfig, +}; /// Checks whether we should seal the block because we've run out of transaction slots. #[derive(Debug)] @@ -12,14 +12,8 @@ impl SealCriterion for SlotsCriterion { config: &StateKeeperConfig, _block_open_timestamp_ms: u128, tx_count: usize, - _block_execution_metrics: ExecutionMetrics, - _tx_execution_metrics: ExecutionMetrics, - _block_gas_count: BlockGasCount, - _tx_gas_count: BlockGasCount, - _block_included_txs_size: usize, - _tx_size: usize, - _block_writes_metrics: DeduplicatedWritesMetrics, - _tx_writes_metrics: DeduplicatedWritesMetrics, + _block_data: &SealData, + _tx_data: &SealData, ) -> SealResolution { if tx_count >= config.transaction_slots { SealResolution::IncludeAndSeal @@ -35,27 +29,19 @@ impl SealCriterion for SlotsCriterion { #[cfg(test)] mod tests { - - use super::{SealCriterion, SealResolution, SlotsCriterion}; - use zksync_config::ZkSyncConfig; + use super::*; #[test] fn test_slots_seal_criterion() { - let config = ZkSyncConfig::from_env().chain.state_keeper; + let config = StateKeeperConfig::from_env(); let criterion = SlotsCriterion; let almost_full_block_resolution = criterion.should_seal( &config, Default::default(), config.transaction_slots - 1, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + &SealData::default(), + &SealData::default(), ); assert_eq!(almost_full_block_resolution, SealResolution::NoSeal); @@ -63,14 +49,8 @@ mod tests { &config, Default::default(), config.transaction_slots, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), + &SealData::default(), + &SealData::default(), ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs deleted file mode 100644 index 625e1b102d3b..000000000000 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/timeout.rs +++ /dev/null @@ -1,107 +0,0 @@ -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use zksync_utils::time::millis_since_epoch; - -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; - -/// Checks whether we should seal the block because we've reached the block commit timeout. -#[derive(Debug)] -pub struct TimeoutCriterion; - -impl SealCriterion for TimeoutCriterion { - fn should_seal( - &self, - config: &StateKeeperConfig, - block_open_timestamp_ms: u128, - tx_count: usize, - _block_execution_metrics: ExecutionMetrics, - _tx_execution_metrics: ExecutionMetrics, - _block_gas_count: BlockGasCount, - _tx_gas_count: BlockGasCount, - _block_included_txs_size: usize, - _tx_size: usize, - _block_writes_metrics: DeduplicatedWritesMetrics, - _tx_writes_metrics: DeduplicatedWritesMetrics, - ) -> SealResolution { - if tx_count == 0 { - return SealResolution::NoSeal; - } - - let current_timestamp = millis_since_epoch(); - - debug_assert!( - current_timestamp >= block_open_timestamp_ms, - "We can't go backwards in time" - ); - - if (current_timestamp - block_open_timestamp_ms) as u64 > config.block_commit_deadline_ms { - SealResolution::IncludeAndSeal - } else { - SealResolution::NoSeal - } - } - - fn prom_criterion_name(&self) -> &'static str { - "seal_criteria_timeout" - } -} - -#[cfg(test)] -mod tests { - - use super::{millis_since_epoch, SealCriterion, SealResolution, TimeoutCriterion}; - use zksync_config::ZkSyncConfig; - - #[test] - fn test_timeout_seal_criterion() { - let config = ZkSyncConfig::from_env().chain.state_keeper; - let criterion = TimeoutCriterion; - - // Empty block shouldn't be sealed by timeout - let empty_block_resolution = criterion.should_seal( - &config, - 0, - 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - assert_eq!(empty_block_resolution, SealResolution::NoSeal); - - // Check criterion workflow - let no_timeout_resolution = criterion.should_seal( - &config, - millis_since_epoch(), - 1, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - assert_eq!(no_timeout_resolution, SealResolution::NoSeal); - - let timeout_resolution = criterion.should_seal( - &config, - millis_since_epoch() - config.block_commit_deadline_ms as u128 - 1, - 1, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - assert_eq!(timeout_resolution, SealResolution::IncludeAndSeal); - } -} diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs index 86c75f05bb60..d5d804fafee7 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/criteria/tx_encoding_size.rs @@ -1,8 +1,8 @@ use vm::vm_with_bootloader::BOOTLOADER_TX_ENCODING_SPACE; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use crate::state_keeper::seal_criteria::{SealCriterion, SealResolution, StateKeeperConfig}; +use crate::state_keeper::seal_criteria::{ + SealCriterion, SealData, SealResolution, StateKeeperConfig, +}; #[derive(Debug)] pub struct TxEncodingSizeCriterion; @@ -13,28 +13,21 @@ impl SealCriterion for TxEncodingSizeCriterion { config: &StateKeeperConfig, _block_open_timestamp_ms: u128, _tx_count: usize, - _block_execution_metrics: ExecutionMetrics, - _tx_execution_metrics: ExecutionMetrics, - _block_gas_count: BlockGasCount, - _tx_gas_count: BlockGasCount, - block_included_txs_size: usize, - tx_size: usize, - _block_writes_metrics: DeduplicatedWritesMetrics, - _tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { - if tx_size - > (BOOTLOADER_TX_ENCODING_SPACE as f64 * config.reject_tx_at_geometry_percentage) - .round() as usize - { - SealResolution::Unexecutable( - "Transaction cannot be included due to large encoding size".into(), - ) - } else if block_included_txs_size > BOOTLOADER_TX_ENCODING_SPACE as usize { + let reject_bound = + (BOOTLOADER_TX_ENCODING_SPACE as f64 * config.reject_tx_at_geometry_percentage).round(); + let include_and_seal_bound = (BOOTLOADER_TX_ENCODING_SPACE as f64 + * config.close_block_at_geometry_percentage) + .round(); + + if tx_data.cumulative_size > reject_bound as usize { + let message = "Transaction cannot be included due to large encoding size"; + SealResolution::Unexecutable(message.into()) + } else if block_data.cumulative_size > BOOTLOADER_TX_ENCODING_SPACE as usize { SealResolution::ExcludeAndSeal - } else if block_included_txs_size - > (BOOTLOADER_TX_ENCODING_SPACE as f64 * config.close_block_at_geometry_percentage) - .round() as usize - { + } else if block_data.cumulative_size > include_and_seal_bound as usize { SealResolution::IncludeAndSeal } else { SealResolution::NoSeal @@ -48,43 +41,26 @@ impl SealCriterion for TxEncodingSizeCriterion { #[cfg(test)] mod tests { - use super::{ - SealCriterion, SealResolution, TxEncodingSizeCriterion, BOOTLOADER_TX_ENCODING_SPACE, - }; - use zksync_config::ZkSyncConfig; + use super::*; #[test] fn seal_criterion() { - let config = ZkSyncConfig::from_env().chain.state_keeper; + let config = StateKeeperConfig::from_env(); let criterion = TxEncodingSizeCriterion; - let empty_block_resolution = criterion.should_seal( - &config, - Default::default(), - 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); + let empty_block_resolution = + criterion.should_seal(&config, 0, 0, &SealData::default(), &SealData::default()); assert_eq!(empty_block_resolution, SealResolution::NoSeal); let unexecutable_resolution = criterion.should_seal( &config, - Default::default(), 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - BOOTLOADER_TX_ENCODING_SPACE as usize + 1, - Default::default(), - Default::default(), + 0, + &SealData::default(), + &SealData { + cumulative_size: BOOTLOADER_TX_ENCODING_SPACE as usize + 1, + ..SealData::default() + }, ); assert_eq!( unexecutable_resolution, @@ -95,31 +71,31 @@ mod tests { let exclude_and_seal_resolution = criterion.should_seal( &config, - Default::default(), 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - BOOTLOADER_TX_ENCODING_SPACE as usize + 1, - 1, - Default::default(), - Default::default(), + 0, + &SealData { + cumulative_size: BOOTLOADER_TX_ENCODING_SPACE as usize + 1, + ..SealData::default() + }, + &SealData { + cumulative_size: 1, + ..SealData::default() + }, ); assert_eq!(exclude_and_seal_resolution, SealResolution::ExcludeAndSeal); let include_and_seal_resolution = criterion.should_seal( &config, - Default::default(), 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - BOOTLOADER_TX_ENCODING_SPACE as usize, - 1, - Default::default(), - Default::default(), + 0, + &SealData { + cumulative_size: BOOTLOADER_TX_ENCODING_SPACE as usize, + ..SealData::default() + }, + &SealData { + cumulative_size: 1, + ..SealData::default() + }, ); assert_eq!(include_and_seal_resolution, SealResolution::IncludeAndSeal); } diff --git a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs index d30332f322b7..0402c6e45162 100644 --- a/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -10,18 +10,24 @@ //! Maintaining all the criteria in one place has proven itself to be very error-prone, //! thus now every criterion is independent of the others. -use std::fmt::Debug; -pub(self) use zksync_config::configs::chain::StateKeeperConfig; +use std::fmt; + +use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::block::BlockGasCount; -use zksync_types::tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}; -use zksync_utils::time::{millis_since, millis_since_epoch}; +use zksync_types::{ + block::BlockGasCount, + fee::TransactionExecutionMetrics, + tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, + Transaction, +}; +use zksync_utils::time::millis_since; -use self::conditional_sealer::ConditionalSealer; -use super::updates::UpdatesManager; +mod conditional_sealer; +pub(super) mod criteria; -pub(crate) mod conditional_sealer; -pub(crate) mod criteria; +pub(crate) use self::conditional_sealer::ConditionalSealer; +use super::{extractors, updates::UpdatesManager}; +use crate::gas_tracker::{gas_count_from_tx_and_metrics, gas_count_from_writes}; /// Reported decision regarding block sealing. #[derive(Debug, Clone, PartialEq)] @@ -50,45 +56,73 @@ impl SealResolution { /// `Unexecutable` is stricter than `ExcludeAndSeal`. /// `ExcludeAndSeal` is stricter than `IncludeAndSeal`. /// `IncludeAndSeal` is stricter than `NoSeal`. - pub fn stricter(self, other: SealResolution) -> SealResolution { + pub fn stricter(self, other: Self) -> Self { match (self, other) { - (SealResolution::Unexecutable(reason), _) - | (_, SealResolution::Unexecutable(reason)) => SealResolution::Unexecutable(reason), - (SealResolution::ExcludeAndSeal, _) | (_, SealResolution::ExcludeAndSeal) => { - SealResolution::ExcludeAndSeal - } - (SealResolution::IncludeAndSeal, _) | (_, SealResolution::IncludeAndSeal) => { - SealResolution::IncludeAndSeal + (Self::Unexecutable(reason), _) | (_, Self::Unexecutable(reason)) => { + Self::Unexecutable(reason) } - _ => SealResolution::NoSeal, + (Self::ExcludeAndSeal, _) | (_, Self::ExcludeAndSeal) => Self::ExcludeAndSeal, + (Self::IncludeAndSeal, _) | (_, Self::IncludeAndSeal) => Self::IncludeAndSeal, + _ => Self::NoSeal, } } /// Returns `true` if L1 batch should be sealed according to this resolution. - pub fn should_seal(self) -> bool { - matches!( - self, - SealResolution::IncludeAndSeal | SealResolution::ExcludeAndSeal - ) + pub fn should_seal(&self) -> bool { + matches!(self, Self::IncludeAndSeal | Self::ExcludeAndSeal) + } + + /// Name of this resolution usable as a metric label. + pub fn name(&self) -> &'static str { + match self { + Self::NoSeal => "no_seal", + Self::IncludeAndSeal => "include_and_seal", + Self::ExcludeAndSeal => "exclude_and_seal", + Self::Unexecutable(_) => "unexecutable", + } + } +} + +/// Information about transaction or block applicable either to a single transaction, or +/// to the entire miniblock / L1 batch. +#[derive(Debug, Default)] +pub struct SealData { + pub(super) execution_metrics: ExecutionMetrics, + pub(super) gas_count: BlockGasCount, + pub(super) cumulative_size: usize, + pub(super) writes_metrics: DeduplicatedWritesMetrics, +} + +impl SealData { + /// Creates sealing data based on the execution of a `transaction`. Assumes that all writes + /// performed by the transaction are initial. + pub(crate) fn for_transaction( + transaction: Transaction, + tx_metrics: &TransactionExecutionMetrics, + ) -> Self { + let execution_metrics = ExecutionMetrics::from_tx_metrics(tx_metrics); + let writes_metrics = DeduplicatedWritesMetrics::from_tx_metrics(tx_metrics); + let gas_count = gas_count_from_tx_and_metrics(&transaction, &execution_metrics) + + gas_count_from_writes(&writes_metrics); + Self { + execution_metrics, + gas_count, + cumulative_size: extractors::encoded_transaction_size(transaction), + writes_metrics, + } } } -pub trait SealCriterion: Debug + Send + 'static { - #[allow(clippy::too_many_arguments)] +pub(super) trait SealCriterion: fmt::Debug + Send + 'static { fn should_seal( &self, config: &StateKeeperConfig, block_open_timestamp_ms: u128, tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - block_gas_count: BlockGasCount, - tx_gas_count: BlockGasCount, - block_included_txs_size: usize, - tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution; + // We need self here only for rust restrictions for creating an object from trait // https://doc.rust-lang.org/reference/items/traits.html#object-safety fn prom_criterion_name(&self) -> &'static str; @@ -112,15 +146,17 @@ pub struct SealManager { miniblock_sealers: Vec>, } -impl Debug for SealManager { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SealManager").finish() +impl fmt::Debug for SealManager { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("SealManager") + .finish_non_exhaustive() } } impl SealManager { /// Creates a default pre-configured seal manager for the main node. - pub(crate) fn new(config: StateKeeperConfig) -> Self { + pub(super) fn new(config: StateKeeperConfig) -> Self { let timeout_batch_sealer = Self::timeout_batch_sealer(config.block_commit_deadline_ms); let code_hash_batch_sealer = Self::code_hash_batch_sealer(BaseSystemContractsHashes { bootloader: config.bootloader_hash, @@ -140,64 +176,58 @@ impl SealManager { /// Allows to create a seal manager object from externally-defined sealers. pub fn custom( conditional_sealer: Option, - unconditional_sealer: Vec>, - miniblock_sealer: Vec>, + unconditional_sealers: Vec>, + miniblock_sealers: Vec>, ) -> Self { Self { conditional_sealer, - unconditional_sealers: unconditional_sealer, - miniblock_sealers: miniblock_sealer, + unconditional_sealers, + miniblock_sealers, } } /// Creates a sealer function that would seal the batch because of the timeout. - pub(crate) fn timeout_batch_sealer(block_commit_deadline_ms: u64) -> Box { + fn timeout_batch_sealer(block_commit_deadline_ms: u64) -> Box { + const RULE_NAME: &str = "no_txs_timeout"; + Box::new(move |manager| { // Verify timestamp let should_seal_timeout = millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; if should_seal_timeout { - metrics::increment_counter!( - "server.tx_aggregation.reason", - "criterion" => "no_txs_timeout" - ); - vlog::info!( - "l1_batch_timeout_triggered without new txs: {:?} {:?} {:?}", - manager.batch_timestamp(), - block_commit_deadline_ms, - millis_since_epoch() + metrics::increment_counter!("server.tx_aggregation.reason", "criterion" => RULE_NAME); + vlog::debug!( + "Decided to seal L1 batch using rule `{RULE_NAME}`; batch timestamp: {}, \ + commit deadline: {block_commit_deadline_ms}ms", + extractors::display_timestamp(manager.batch_timestamp()) ); } - should_seal_timeout }) } /// Creates a sealer function that would seal the batch if the provided base system contract hashes are different /// from ones in the updates manager. - pub(crate) fn code_hash_batch_sealer( + pub(super) fn code_hash_batch_sealer( base_system_contracts_hashes: BaseSystemContractsHashes, ) -> Box { + const RULE_NAME: &str = "different_code_hashes"; + Box::new(move |manager| { // Verify code hashes let should_seal_code_hashes = base_system_contracts_hashes != manager.base_system_contract_hashes(); if should_seal_code_hashes { - metrics::increment_counter!( - "server.tx_aggregation.reason", - "criterion" => "different_code_hashes" - ); - vlog::info!( - "l1_batch_different_code_hashes_triggered without new txs \n - l1 batch code hashes: {:?} \n - expected code hashes {:?} ", + metrics::increment_counter!("server.tx_aggregation.reason", "criterion" => RULE_NAME); + vlog::debug!( + "Decided to seal L1 batch using rule `{RULE_NAME}`; L1 batch code hashes: {:?}, \ + expected code hashes: {:?}", base_system_contracts_hashes, - manager.base_system_contract_hashes(), + manager.base_system_contract_hashes() ); } - should_seal_code_hashes }) } @@ -211,41 +241,28 @@ impl SealManager { }) } - #[allow(clippy::too_many_arguments)] - pub(crate) fn should_seal_l1_batch( + pub(super) fn should_seal_l1_batch( &self, l1_batch_number: u32, block_open_timestamp_ms: u128, tx_count: usize, - block_execution_metrics: ExecutionMetrics, - tx_execution_metrics: ExecutionMetrics, - block_gas_count: BlockGasCount, - tx_gas_count: BlockGasCount, - block_included_txs_size: usize, - tx_size: usize, - block_writes_metrics: DeduplicatedWritesMetrics, - tx_writes_metrics: DeduplicatedWritesMetrics, + block_data: &SealData, + tx_data: &SealData, ) -> SealResolution { - if let Some(sealer) = self.conditional_sealer.as_ref() { + if let Some(sealer) = &self.conditional_sealer { sealer.should_seal_l1_batch( l1_batch_number, block_open_timestamp_ms, tx_count, - block_execution_metrics, - tx_execution_metrics, - block_gas_count, - tx_gas_count, - block_included_txs_size, - tx_size, - block_writes_metrics, - tx_writes_metrics, + block_data, + tx_data, ) } else { SealResolution::NoSeal } } - pub(crate) fn should_seal_l1_batch_unconditionally( + pub(super) fn should_seal_l1_batch_unconditionally( &self, updates_manager: &UpdatesManager, ) -> bool { @@ -257,7 +274,7 @@ impl SealManager { .any(|sealer| (sealer)(updates_manager)) } - pub(crate) fn should_seal_miniblock(&self, updates_manager: &UpdatesManager) -> bool { + pub(super) fn should_seal_miniblock(&self, updates_manager: &UpdatesManager) -> bool { // Unlike with the L1 batch, we don't check the number of transactions in the miniblock, // because we might want to seal the miniblock even if it's empty (e.g. on an external node, // where we have to replicate the state of the main node, including the last (empty) miniblock of the batch). @@ -271,66 +288,21 @@ impl SealManager { #[cfg(test)] mod tests { - use vm::{ - vm::{VmPartialExecutionResult, VmTxExecutionResult}, - vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - }; - use zksync_types::{ - l2::L2Tx, - tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, - Address, Nonce, H256, U256, - }; use zksync_utils::time::seconds_since_epoch; use super::*; - - fn create_manager() -> UpdatesManager { - let block_context = BlockContextMode::NewBlock( - DerivedBlockContext { - context: BlockContext { - block_number: 0, - block_timestamp: 0, - l1_gas_price: 0, - fair_l2_gas_price: 0, - operator_address: Default::default(), - }, - base_fee: 0, - }, - 0.into(), - ); - UpdatesManager::new(&block_context, Default::default()) - } + use crate::state_keeper::tests::{ + create_execution_result, create_transaction, create_updates_manager, + }; fn apply_tx_to_manager(manager: &mut UpdatesManager) { - let mut tx = L2Tx::new( - Default::default(), - Default::default(), - Nonce(0), - Default::default(), - Address::default(), - U256::zero(), - None, - Default::default(), - ); - tx.set_input(H256::random().0.to_vec(), H256::random()); + let tx = create_transaction(10, 100); manager.extend_from_executed_transaction( - &tx.into(), - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: VmPartialExecutionResult { - logs: VmExecutionLogs::default(), - revert_reason: None, - contracts_used: 0, - cycles_used: 0, - computational_gas_used: 0, - }, - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, - }, - Default::default(), - Default::default(), - Default::default(), + tx, + create_execution_result(0, []), + vec![], + BlockGasCount::default(), + ExecutionMetrics::default(), ); } @@ -339,7 +311,7 @@ mod tests { fn timeout_miniblock_sealer() { let timeout_miniblock_sealer = SealManager::timeout_miniblock_sealer(10_000); - let mut manager = create_manager(); + let mut manager = create_updates_manager(); // Empty miniblock should not trigger. manager.miniblock.timestamp = seconds_since_epoch() - 10; assert!( diff --git a/core/bin/zksync_core/src/state_keeper/tests/mod.rs b/core/bin/zksync_core/src/state_keeper/tests/mod.rs index 55315511126b..4bb747911901 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/mod.rs @@ -1,3 +1,5 @@ +use once_cell::sync::Lazy; + use std::{ sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -6,47 +8,224 @@ use std::{ time::Instant, }; -use crate::gas_tracker::constants::{ - BLOCK_COMMIT_BASE_COST, BLOCK_EXECUTE_BASE_COST, BLOCK_PROVE_BASE_COST, +use vm::{ + vm::{VmPartialExecutionResult, VmTxExecutionResult}, + vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, + VmBlockResult, VmExecutionResult, }; -use once_cell::sync::Lazy; -use zksync_config::configs::chain::StateKeeperConfig; -use zksync_config::constants::ZKPORTER_IS_AVAILABLE; +use zksync_config::{configs::chain::StateKeeperConfig, constants::ZKPORTER_IS_AVAILABLE}; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ - block::BlockGasCount, zk_evm::block_properties::BlockProperties, MiniblockNumber, + block::BlockGasCount, + commitment::{BlockMetaParameters, BlockMetadata}, + fee::Fee, + l2::L2Tx, + transaction_request::PaymasterParams, + tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}, + vm_trace::{VmExecutionTrace, VmTrace}, + zk_evm::aux_structures::{LogQuery, Timestamp}, + zk_evm::block_properties::BlockProperties, + Address, L2ChainId, MiniblockNumber, Nonce, StorageLogQuery, StorageLogQueryType, Transaction, + H256, U256, }; use zksync_utils::h256_to_u256; +use self::tester::{ + bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, + successful_exec_with_metrics, TestScenario, +}; +use crate::gas_tracker::constants::{ + BLOCK_COMMIT_BASE_COST, BLOCK_EXECUTE_BASE_COST, BLOCK_PROVE_BASE_COST, +}; use crate::state_keeper::{ + keeper::POLL_WAIT_DURATION, seal_criteria::{ - criteria::{gas::GasCriterion, slots::SlotsCriterion}, - SealManager, + criteria::{GasCriterion, SlotsCriterion}, + ConditionalSealer, SealManager, }, types::ExecutionMetricsForCriteria, + updates::UpdatesManager, }; -use self::tester::{ - bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, - successful_exec_with_metrics, TestScenario, -}; - -use super::{keeper::POLL_WAIT_DURATION, seal_criteria::conditional_sealer::ConditionalSealer}; - mod tester; -pub static BASE_SYSTEM_CONTRACTS: Lazy = +pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub fn default_block_properties() -> BlockProperties { +pub(super) fn default_block_properties() -> BlockProperties { BlockProperties { default_aa_code_hash: h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash), zkporter_is_available: ZKPORTER_IS_AVAILABLE, } } -#[test] -fn sealed_by_number_of_txs() { +pub(super) fn create_block_metadata(number: u32) -> BlockMetadata { + BlockMetadata { + root_hash: H256::from_low_u64_be(number.into()), + rollup_last_leaf_index: u64::from(number) + 20, + merkle_root_hash: H256::from_low_u64_be(number.into()), + initial_writes_compressed: vec![], + repeated_writes_compressed: vec![], + commitment: H256::from_low_u64_be(number.into()), + l2_l1_messages_compressed: vec![], + l2_l1_merkle_root: H256::from_low_u64_be(number.into()), + block_meta_params: BlockMetaParameters { + zkporter_is_available: ZKPORTER_IS_AVAILABLE, + bootloader_code_hash: BASE_SYSTEM_CONTRACTS.bootloader.hash, + default_aa_code_hash: BASE_SYSTEM_CONTRACTS.default_aa.hash, + }, + aux_data_hash: H256::zero(), + meta_parameters_hash: H256::zero(), + pass_through_data_hash: H256::zero(), + } +} + +pub(super) fn default_vm_block_result() -> VmBlockResult { + VmBlockResult { + full_result: VmExecutionResult { + events: vec![], + storage_log_queries: vec![], + used_contract_hashes: vec![], + l2_to_l1_logs: vec![], + return_data: vec![], + gas_used: 0, + contracts_used: 0, + revert_reason: None, + trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), + total_log_queries: 0, + cycles_used: 0, + computational_gas_used: 0, + }, + block_tip_result: VmPartialExecutionResult { + logs: VmExecutionLogs::default(), + revert_reason: None, + contracts_used: 0, + cycles_used: 0, + computational_gas_used: 0, + }, + } +} + +pub(super) fn default_block_context() -> DerivedBlockContext { + DerivedBlockContext { + context: BlockContext { + block_number: 0, + block_timestamp: 0, + l1_gas_price: 0, + fair_l2_gas_price: 0, + operator_address: Address::default(), + }, + base_fee: 0, + } +} + +pub(super) fn create_updates_manager() -> UpdatesManager { + let block_context = BlockContextMode::NewBlock(default_block_context(), 0.into()); + UpdatesManager::new(&block_context, BaseSystemContractsHashes::default()) +} + +pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L2Tx { + let fee = Fee { + gas_limit: 1000_u64.into(), + max_fee_per_gas: fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }; + let mut tx = L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + fee, + U256::zero(), + L2ChainId(271), + &H256::repeat_byte(0x11), + None, + PaymasterParams::default(), + ) + .unwrap(); + // Input means all transaction data (NOT calldata, but all tx fields) that came from the API. + // This input will be used for the derivation of the tx hash, so put some random to it to be sure + // that the transaction hash is unique. + tx.set_input(H256::random().0.to_vec(), H256::random()); + tx +} + +pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> Transaction { + create_l2_transaction(fee_per_gas, gas_per_pubdata).into() +} + +pub(super) fn create_execution_result( + tx_number_in_block: u16, + storage_logs: impl IntoIterator, +) -> VmTxExecutionResult { + let storage_logs: Vec<_> = storage_logs + .into_iter() + .map(|(key, query)| query.into_log(key, tx_number_in_block)) + .collect(); + + let logs = VmExecutionLogs { + total_log_queries_count: storage_logs.len() + 2, + storage_logs, + events: vec![], + l2_to_l1_logs: vec![], + }; + VmTxExecutionResult { + status: TxExecutionStatus::Success, + result: VmPartialExecutionResult { + logs, + revert_reason: None, + contracts_used: 0, + cycles_used: 0, + computational_gas_used: 0, + }, + call_traces: vec![], + gas_refunded: 0, + operator_suggested_refund: 0, + } +} + +#[derive(Debug, Clone, Copy)] +pub(super) enum Query { + Read(U256), + InitialWrite(U256), + RepeatedWrite(U256, U256), +} + +impl Query { + fn into_log(self, key: U256, tx_number_in_block: u16) -> StorageLogQuery { + let log_type = match self { + Self::Read(_) => StorageLogQueryType::Read, + Self::InitialWrite(_) => StorageLogQueryType::InitialWrite, + Self::RepeatedWrite(_, _) => StorageLogQueryType::RepeatedWrite, + }; + + StorageLogQuery { + log_query: LogQuery { + timestamp: Timestamp(0), + tx_number_in_block, + aux_byte: 0, + shard_id: 0, + address: Address::default(), + key, + read_value: match self { + Self::Read(prev) | Self::RepeatedWrite(prev, _) => prev, + Self::InitialWrite(_) => U256::zero(), + }, + written_value: match self { + Self::Read(_) => U256::zero(), + Self::InitialWrite(value) | Self::RepeatedWrite(_, value) => value, + }, + rw_flag: !matches!(self, Self::Read(_)), + rollback: false, + is_service: false, + }, + log_type, + } + } +} + +#[tokio::test] +async fn sealed_by_number_of_txs() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -71,11 +250,12 @@ fn sealed_by_number_of_txs() { .next_tx("Second tx", random_tx(2), successful_exec()) .miniblock_sealed("Miniblock 2") .batch_sealed("Batch 1") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn sealed_by_gas() { +#[tokio::test] +async fn sealed_by_gas() { let config = StateKeeperConfig { max_single_tx_gas: 62_002, reject_tx_at_gas_percentage: 1.0, @@ -126,11 +306,11 @@ fn sealed_by_gas() { "L1 gas used by a batch should consists of gas used by its txs + basic block gas cost" ); }) - .run(sealer); + .run(sealer).await; } -#[test] -fn sealed_by_gas_then_by_num_tx() { +#[tokio::test] +async fn sealed_by_gas_then_by_num_tx() { let config = StateKeeperConfig { max_single_tx_gas: 62_000, reject_tx_at_gas_percentage: 1.0, @@ -171,11 +351,12 @@ fn sealed_by_gas_then_by_num_tx() { .next_tx("Fourth tx", random_tx(4), successful_exec()) .miniblock_sealed("Miniblock 4") .batch_sealed("Batch 2") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn batch_sealed_before_miniblock_does() { +#[tokio::test] +async fn batch_sealed_before_miniblock_does() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -206,11 +387,12 @@ fn batch_sealed_before_miniblock_does() { ); }) .batch_sealed("Batch 1") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn basic_flow() { +#[tokio::test] +async fn basic_flow() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -233,11 +415,12 @@ fn basic_flow() { .next_tx("Second tx", random_tx(2), successful_exec()) .miniblock_sealed("Miniblock 2") .batch_sealed("Batch 1") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn rejected_tx() { +#[tokio::test] +async fn rejected_tx() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -263,11 +446,12 @@ fn rejected_tx() { .next_tx("Second successful tx", random_tx(3), successful_exec()) .miniblock_sealed("Second miniblock") .batch_sealed("Batch with 2 successful txs") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn bootloader_tip_out_of_gas_flow() { +#[tokio::test] +async fn bootloader_tip_out_of_gas_flow() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -309,11 +493,12 @@ fn bootloader_tip_out_of_gas_flow() { .next_tx("Second tx of the 2nd batch", third_tx, successful_exec()) .miniblock_sealed("Miniblock with 2nd tx") .batch_sealed("2nd batch sealed") - .run(sealer); + .run(sealer) + .await; } -#[test] -fn bootloader_config_has_been_updated() { +#[tokio::test] +async fn bootloader_config_has_been_updated() { let sealer = SealManager::custom( None, vec![SealManager::code_hash_batch_sealer( @@ -346,11 +531,12 @@ fn bootloader_config_has_been_updated() { "There should be 1 transactions in the batch" ); }) - .run(sealer); + .run(sealer) + .await; } -#[test] -fn pending_batch_is_applied() { +#[tokio::test] +async fn pending_batch_is_applied() { let config = StateKeeperConfig { transaction_slots: 3, ..Default::default() @@ -390,12 +576,13 @@ fn pending_batch_is_applied() { "There should be 3 transactions in the batch" ); }) - .run(sealer); + .run(sealer) + .await; } /// Unconditionally seal the batch without triggering specific criteria. -#[test] -fn unconditional_sealing() { +#[tokio::test] +async fn unconditional_sealing() { // Trigger to know when to seal the batch. // Once miniblock with one tx would be sealed, trigger would allow batch to be sealed as well. let batch_seal_trigger = Arc::new(AtomicBool::new(false)); @@ -434,12 +621,13 @@ fn unconditional_sealing() { .miniblock_sealed("Miniblock is sealed with just one tx") .no_txs_until_next_action("Still no tx") .batch_sealed("Batch is sealed with just one tx") - .run(sealer); + .run(sealer) + .await; } /// Checks the next miniblock sealed after pending batch has a correct timestamp -#[test] -fn miniblock_timestamp_after_pending_batch() { +#[tokio::test] +async fn miniblock_timestamp_after_pending_batch() { let config = StateKeeperConfig { transaction_slots: 2, ..Default::default() @@ -472,15 +660,16 @@ fn miniblock_timestamp_after_pending_batch() { ); }) .batch_sealed("Batch is sealed with two transactions") - .run(sealer); + .run(sealer) + .await; } /// Makes sure that the timestamp doesn't decrease in consequent miniblocks. /// /// Timestamps are faked in the IO layer, so this test mostly makes sure that the state keeper doesn't substitute /// any unexpected value on its own. -#[test] -fn time_is_monotonic() { +#[tokio::test] +async fn time_is_monotonic() { let timestamp_first_miniblock = Arc::new(AtomicU64::new(0u64)); // Time is faked in tests. let timestamp_second_miniblock = timestamp_first_miniblock.clone(); let timestamp_third_miniblock = timestamp_first_miniblock.clone(); @@ -541,5 +730,6 @@ fn time_is_monotonic() { ); timestamp_third_miniblock.store(updates.miniblock.timestamp, Ordering::Relaxed); }) - .run(sealer); + .run(sealer) + .await; } diff --git a/core/bin/zksync_core/src/state_keeper/tests/tester.rs b/core/bin/zksync_core/src/state_keeper/tests/tester.rs index 0278fe2f2598..b3d60438b2f4 100644 --- a/core/bin/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/bin/zksync_core/src/state_keeper/tests/tester.rs @@ -1,29 +1,30 @@ +use async_trait::async_trait; +use tokio::sync::{mpsc, watch}; + use std::{ - cell::RefCell, collections::{HashMap, HashSet, VecDeque}, - sync::mpsc, + sync::{Arc, RwLock}, time::{Duration, Instant}, }; -use assert_matches::assert_matches; -use tokio::sync::watch; - use vm::{ vm::{VmPartialExecutionResult, VmTxExecutionResult}, vm_with_bootloader::{BlockContext, BlockContextMode, DerivedBlockContext}, - VmBlockResult, VmExecutionResult, + VmBlockResult, }; -use zksync_types::vm_trace::{VmExecutionTrace, VmTrace}; use zksync_types::{ - l2::L2Tx, tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, - Nonce, Transaction, H256, U256, + tx::tx_execution_info::TxExecutionStatus, Address, L1BatchNumber, MiniblockNumber, Transaction, + H256, U256, }; use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, io::{L1BatchParams, PendingBatchData, StateKeeperIO}, seal_criteria::SealManager, - tests::{default_block_properties, BASE_SYSTEM_CONTRACTS}, + tests::{ + create_l2_transaction, default_block_properties, default_vm_block_result, + BASE_SYSTEM_CONTRACTS, + }, types::ExecutionMetricsForCriteria, updates::UpdatesManager, ZkSyncStateKeeper, @@ -151,7 +152,7 @@ impl TestScenario { /// Launches the test. /// Provided `SealManager` is expected to be externally configured to adhere the written scenario logic. - pub(crate) fn run(self, sealer: SealManager) { + pub(crate) async fn run(self, sealer: SealManager) { assert!(!self.actions.is_empty(), "Test scenario can't be empty"); let batch_executor_base = TestBatchExecutorBuilder::new(&self); @@ -166,7 +167,7 @@ impl TestScenario { sealer, ); - let sk_thread = std::thread::spawn(move || sk.run()); + let sk_thread = tokio::spawn(sk.run()); // We must assume that *theoretically* state keeper may ignore the stop signal from IO once scenario is // completed, so we spawn it in a separate thread to not get test stuck. @@ -176,11 +177,11 @@ impl TestScenario { while start.elapsed() <= hard_timeout { if sk_thread.is_finished() { sk_thread - .join() + .await .unwrap_or_else(|_| panic!("State keeper thread panicked")); return; } - std::thread::sleep(poll_interval); + tokio::time::sleep(poll_interval).await; } panic!("State keeper test did not exit until the hard timeout, probably it got stuck"); } @@ -189,16 +190,7 @@ impl TestScenario { /// Creates a random transaction. Provided tx number would be used as a transaction hash, /// so it's easier to understand which transaction caused test to fail. pub(crate) fn random_tx(tx_number: u64) -> Transaction { - let mut tx = L2Tx::new( - Default::default(), - Default::default(), - Nonce(0), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); + let mut tx = create_l2_transaction(10, 100); // Set the `tx_number` as tx hash so if transaction causes problems, // it'll be easier to understand which one. tx.set_input(H256::random().0.to_vec(), H256::from_low_u64_be(tx_number)); @@ -342,13 +334,15 @@ impl std::fmt::Debug for ScenarioItem { } } +type ExpectedTransactions = VecDeque>>; + #[derive(Debug)] pub(crate) struct TestBatchExecutorBuilder { /// Sequence of known transaction execution results per batch. /// We need to store txs for each batch separately, since the same transaction /// can be executed in several batches (e.g. after an `ExcludeAndSeal` rollback). /// When initializing each batch, we will `pop_front` known txs for the corresponding executor. - txs: RefCell>>>, + txs: Arc>, /// Set of transactions that would be rolled back at least once. rollback_set: HashSet, } @@ -402,23 +396,23 @@ impl TestBatchExecutorBuilder { txs.push_back(HashMap::default()); Self { - txs: RefCell::new(txs), + txs: Arc::new(RwLock::new(txs)), rollback_set, } } } +#[async_trait] impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { - fn init_batch(&self, _l1batch_params: L1BatchParams) -> BatchExecutorHandle { - let (commands_sender, commands_receiver) = mpsc::channel(); + async fn init_batch(&self, _l1batch_params: L1BatchParams) -> BatchExecutorHandle { + let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = TestBatchExecutor::new( commands_receiver, - self.txs.borrow_mut().pop_front().unwrap(), + self.txs.write().unwrap().pop_front().unwrap(), self.rollback_set.clone(), ); - - let handle = std::thread::spawn(move || executor.run()); + let handle = tokio::task::spawn_blocking(move || executor.run()); BatchExecutorHandle::from_raw(handle, commands_sender) } @@ -451,7 +445,7 @@ impl TestBatchExecutor { } pub(super) fn run(mut self) { - while let Ok(cmd) = self.commands.recv() { + while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { let result = self @@ -485,31 +479,7 @@ impl TestBatchExecutor { } Command::FinishBatch(resp) => { // Blanket result, it doesn't really matter. - let result = VmBlockResult { - full_result: VmExecutionResult { - events: Default::default(), - storage_log_queries: Default::default(), - used_contract_hashes: Default::default(), - l2_to_l1_logs: Default::default(), - return_data: Default::default(), - gas_used: Default::default(), - contracts_used: Default::default(), - revert_reason: Default::default(), - trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), - total_log_queries: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - }, - block_tip_result: VmPartialExecutionResult { - logs: Default::default(), - revert_reason: Default::default(), - contracts_used: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - }, - }; - - resp.send(result).unwrap(); + resp.send(default_vm_block_result()).unwrap(); return; } } @@ -570,6 +540,7 @@ impl TestIO { } } +#[async_trait] impl StateKeeperIO for TestIO { fn current_l1_batch_number(&self) -> L1BatchNumber { self.batch_number @@ -579,11 +550,11 @@ impl StateKeeperIO for TestIO { self.miniblock_number } - fn load_pending_batch(&mut self) -> Option { + async fn load_pending_batch(&mut self) -> Option { self.scenario.pending_batch.take() } - fn wait_for_new_batch_params(&mut self, _max_wait: Duration) -> Option { + async fn wait_for_new_batch_params(&mut self, _max_wait: Duration) -> Option { let block_properties = default_block_properties(); let previous_block_hash = U256::zero(); @@ -606,55 +577,46 @@ impl StateKeeperIO for TestIO { }) } - fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { + async fn wait_for_new_miniblock_params(&mut self, _max_wait: Duration) -> Option { Some(self.timestamp) } - fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { + async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { let action = self.pop_next_item("wait_for_next_tx"); // Check whether we should ignore tx requests. if self.skipping_txs { // As per expectation, we should provide a delay given by the state keeper. - std::thread::sleep(max_wait); + tokio::time::sleep(max_wait).await; // Return the action to the scenario as we don't use it. self.scenario.actions.push_front(action); return None; } // We shouldn't, process normally. - assert_matches!( - action, - ScenarioItem::Tx(_, _, _), - "Expected action from scenario (first), instead got another action (second)" - ); - let ScenarioItem::Tx(_, tx, _) = action else { unreachable!() }; + let ScenarioItem::Tx(_, tx, _) = action else { + panic!("Unexpected action: {:?}", action); + }; Some(tx) } - fn rollback(&mut self, tx: &Transaction) { + async fn rollback(&mut self, tx: Transaction) { let action = self.pop_next_item("rollback"); - assert_matches!( - action, - ScenarioItem::Rollback(_, _), - "Expected action from scenario (first), instead got another action (second)" - ); - let ScenarioItem::Rollback(_, expected_tx) = action else { unreachable!() }; + let ScenarioItem::Rollback(_, expected_tx) = action else { + panic!("Unexpected action: {:?}", action); + }; assert_eq!( - tx, &expected_tx, + tx, expected_tx, "Incorrect transaction has been rolled back" ); self.skipping_txs = false; } - fn reject(&mut self, tx: &Transaction, error: &str) { + async fn reject(&mut self, tx: &Transaction, error: &str) { let action = self.pop_next_item("reject"); - assert_matches!( - action, - ScenarioItem::Reject(_, _, _), - "Expected action from scenario (first), instead got another action (second)" - ); - let ScenarioItem::Reject(_, expected_tx, expected_err) = action else { unreachable!() }; + let ScenarioItem::Reject(_, expected_tx, expected_err) = action else { + panic!("Unexpected action: {:?}", action); + }; assert_eq!(tx, &expected_tx, "Incorrect transaction has been rejected"); if let Some(expected_err) = expected_err { assert!( @@ -667,14 +629,11 @@ impl StateKeeperIO for TestIO { self.skipping_txs = false; } - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { + async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { let action = self.pop_next_item("seal_miniblock"); - assert_matches!( - action, - ScenarioItem::MiniblockSeal(_, _), - "Expected action from scenario (first), instead got another action (second)" - ); - let ScenarioItem::MiniblockSeal(_, check_fn) = action else { unreachable!() }; + let ScenarioItem::MiniblockSeal(_, check_fn) = action else { + panic!("Unexpected action: {:?}", action); + }; if let Some(check_fn) = check_fn { check_fn(updates_manager); } @@ -683,19 +642,16 @@ impl StateKeeperIO for TestIO { self.skipping_txs = false; } - fn seal_l1_batch( + async fn seal_l1_batch( &mut self, block_result: VmBlockResult, updates_manager: UpdatesManager, block_context: DerivedBlockContext, ) { let action = self.pop_next_item("seal_l1_batch"); - assert_matches!( - action, - ScenarioItem::BatchSeal(_, _), - "Expected action from scenario (first), instead got another action (second)" - ); - let ScenarioItem::BatchSeal(_, check_fn) = action else { unreachable!() }; + let ScenarioItem::BatchSeal(_, check_fn) = action else { + panic!("Unexpected action: {:?}", action); + }; if let Some(check_fn) = check_fn { check_fn(&block_result, &updates_manager, &block_context.context); } diff --git a/core/bin/zksync_core/src/state_keeper/types.rs b/core/bin/zksync_core/src/state_keeper/types.rs index f7b1b2424b66..e6fdd1e03071 100644 --- a/core/bin/zksync_core/src/state_keeper/types.rs +++ b/core/bin/zksync_core/src/state_keeper/types.rs @@ -1,12 +1,22 @@ -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; -use zksync_types::{block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, Transaction}; +use zksync_types::{ + block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, +}; #[derive(Debug, Clone)] -pub struct MempoolGuard(pub Arc>); +pub struct MempoolGuard(Arc>); impl MempoolGuard { + pub fn new(next_priority_id: PriorityOpId, capacity: u64) -> Self { + let store = MempoolStore::new(next_priority_id, capacity); + Self(Arc::new(Mutex::new(store))) + } + pub fn insert(&mut self, transactions: Vec, nonces: HashMap) { self.0 .lock() diff --git a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs index bc01b97bffd4..36c113c73b5d 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/l1_batch_updates.rs @@ -27,7 +27,7 @@ impl L1BatchUpdates { } pub(crate) fn extend_from_sealed_miniblock(&mut self, miniblock_updates: MiniblockUpdates) { - for tx in miniblock_updates.executed_transactions.iter() { + for tx in &miniblock_updates.executed_transactions { if let ExecuteTransactionCommon::L1(data) = &tx.transaction.common_data { let onchain_metadata = data.onchain_metadata().onchain_data; self.priority_ops_onchain_data.push(onchain_metadata); @@ -45,47 +45,26 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { use super::*; - use crate::gas_tracker::new_block_gas_count; - use vm::transaction_data::TransactionData; - use vm::vm::{VmPartialExecutionResult, VmTxExecutionResult}; - use zksync_types::tx::tx_execution_info::TxExecutionStatus; - use zksync_types::{l2::L2Tx, Address, Nonce, Transaction, H256, U256}; + use crate::{ + gas_tracker::new_block_gas_count, + state_keeper::{ + extractors, + tests::{create_execution_result, create_transaction}, + }, + }; #[test] fn apply_miniblock_with_empty_tx() { let mut miniblock_accumulator = MiniblockUpdates::new(0); - let mut tx = L2Tx::new( - Default::default(), - Default::default(), - Nonce(0), - Default::default(), - Address::default(), - U256::zero(), - None, - Default::default(), - ); - - tx.set_input(H256::random().0.to_vec(), H256::random()); - let tx: Transaction = tx.into(); + let tx = create_transaction(10, 100); + let expected_tx_size = extractors::encoded_transaction_size(tx.clone()); miniblock_accumulator.extend_from_executed_transaction( - &tx, - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: VmPartialExecutionResult { - logs: Default::default(), - revert_reason: None, - contracts_used: 0, - cycles_used: 0, - computational_gas_used: 0, - }, - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, - }, - Default::default(), - Default::default(), - Default::default(), + tx, + create_execution_result(0, []), + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], ); let mut l1_batch_accumulator = L1BatchUpdates::new(); @@ -95,11 +74,6 @@ mod tests { assert_eq!(l1_batch_accumulator.l1_gas_count, new_block_gas_count()); assert_eq!(l1_batch_accumulator.priority_ops_onchain_data.len(), 0); assert_eq!(l1_batch_accumulator.block_execution_metrics.l2_l1_logs, 0); - - let tx_data: TransactionData = tx.into(); - assert_eq!( - l1_batch_accumulator.txs_encoding_size, - tx_data.into_tokens().len() - ); + assert_eq!(l1_batch_accumulator.txs_encoding_size, expected_tx_size); } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs index 9b4382deab14..d66d9982b561 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/miniblock_updates.rs @@ -1,14 +1,17 @@ use std::collections::HashMap; -use vm::transaction_data::TransactionData; + use vm::vm::VmTxExecutionResult; -use zksync_types::block::BlockGasCount; -use zksync_types::event::extract_bytecodes_marked_as_known; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::tx::tx_execution_info::VmExecutionLogs; -use zksync_types::tx::ExecutionMetrics; -use zksync_types::{tx::TransactionExecutionResult, StorageLogQuery, Transaction, VmEvent, H256}; +use zksync_types::{ + block::BlockGasCount, + event::extract_bytecodes_marked_as_known, + l2_to_l1_log::L2ToL1Log, + tx::{tx_execution_info::VmExecutionLogs, ExecutionMetrics, TransactionExecutionResult}, + StorageLogQuery, Transaction, VmEvent, H256, +}; use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; +use crate::state_keeper::extractors; + #[derive(Debug, Clone, PartialEq)] pub struct MiniblockUpdates { pub executed_transactions: Vec, @@ -16,24 +19,23 @@ pub struct MiniblockUpdates { pub storage_logs: Vec, pub l2_to_l1_logs: Vec, pub new_factory_deps: HashMap>, - // how much L1 gas will it take to submit this block? + /// How much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, pub block_execution_metrics: ExecutionMetrics, pub txs_encoding_size: usize, - pub timestamp: u64, } impl MiniblockUpdates { pub(crate) fn new(timestamp: u64) -> Self { Self { - executed_transactions: Default::default(), - events: Default::default(), - storage_logs: Default::default(), - l2_to_l1_logs: Default::default(), - new_factory_deps: Default::default(), - l1_gas_count: Default::default(), - block_execution_metrics: Default::default(), + executed_transactions: vec![], + events: vec![], + storage_logs: vec![], + l2_to_l1_logs: vec![], + new_factory_deps: HashMap::new(), + l1_gas_count: BlockGasCount::default(), + block_execution_metrics: ExecutionMetrics::default(), txs_encoding_size: 0, timestamp, } @@ -47,7 +49,7 @@ impl MiniblockUpdates { pub(crate) fn extend_from_executed_transaction( &mut self, - tx: &Transaction, + tx: Transaction, tx_execution_result: VmTxExecutionResult, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: ExecutionMetrics, @@ -58,34 +60,38 @@ impl MiniblockUpdates { extract_bytecodes_marked_as_known(&tx_execution_result.result.logs.events); // Get transaction factory deps - let tx_factory_deps: HashMap<_, _> = tx - .execute - .factory_deps - .clone() - .unwrap_or_default() + let factory_deps = tx.execute.factory_deps.as_deref().unwrap_or_default(); + let tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) + .map(|bytecode| (hash_bytecode(bytecode), bytecode)) .collect(); // Save all bytecodes that were marked as known on the bootloader - saved_factory_deps.into_iter().for_each(|bytecodehash| { - let bytecode = tx_factory_deps - .get(&bytecodehash) - .unwrap_or_else(|| { - panic!( - "Failed to get factory deps on tx: bytecode hash: {:?}, tx hash: {}", - bytecodehash, - tx.hash() - ) - }) - .clone(); - - self.new_factory_deps.insert(bytecodehash, bytecode); + let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { + let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { + panic!( + "Failed to get factory deps on tx: bytecode hash: {:?}, tx hash: {}", + bytecode_hash, + tx.hash() + ) + }); + (bytecode_hash, bytecode.to_vec()) }); + self.new_factory_deps.extend(known_bytecodes); + + self.events.extend(tx_execution_result.result.logs.events); + self.storage_logs + .extend(tx_execution_result.result.logs.storage_logs); + self.l2_to_l1_logs + .extend(tx_execution_result.result.logs.l2_to_l1_logs); + + self.l1_gas_count += tx_l1_gas_this_tx; + self.block_execution_metrics += execution_metrics; + self.txs_encoding_size += extractors::encoded_transaction_size(tx.clone()); self.executed_transactions.push(TransactionExecutionResult { - transaction: tx.clone(), hash: tx.hash(), + transaction: tx, execution_info: execution_metrics, execution_status: tx_execution_result.status, refunded_gas: tx_execution_result.gas_refunded, @@ -97,63 +103,26 @@ impl MiniblockUpdates { .revert_reason .map(|reason| reason.to_string()), }); - - self.events.extend(tx_execution_result.result.logs.events); - self.storage_logs - .extend(tx_execution_result.result.logs.storage_logs); - self.l2_to_l1_logs - .extend(tx_execution_result.result.logs.l2_to_l1_logs); - - self.l1_gas_count += tx_l1_gas_this_tx; - self.block_execution_metrics += execution_metrics; - - let tx_data: TransactionData = tx.clone().into(); - self.txs_encoding_size += tx_data.into_tokens().len(); } } #[cfg(test)] mod tests { use super::*; - use vm::vm::{VmPartialExecutionResult, VmTxExecutionResult}; - use zksync_types::{l2::L2Tx, tx::tx_execution_info::TxExecutionStatus, Address, Nonce, U256}; + use crate::state_keeper::tests::{create_execution_result, create_transaction}; #[test] fn apply_empty_l2_tx() { let mut accumulator = MiniblockUpdates::new(0); - - let mut tx = L2Tx::new( - Default::default(), - Default::default(), - Nonce(0), - Default::default(), - Address::default(), - U256::zero(), - None, - Default::default(), - ); - - tx.set_input(H256::random().0.to_vec(), H256::random()); - let tx: Transaction = tx.into(); + let tx = create_transaction(10, 100); + let expected_tx_size = extractors::encoded_transaction_size(tx.clone()); accumulator.extend_from_executed_transaction( - &tx, - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: VmPartialExecutionResult { - logs: Default::default(), - revert_reason: None, - contracts_used: 0, - cycles_used: 0, - computational_gas_used: 0, - }, - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, - }, - Default::default(), - Default::default(), - Default::default(), + tx, + create_execution_result(0, []), + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], ); assert_eq!(accumulator.executed_transactions.len(), 1); @@ -163,8 +132,6 @@ mod tests { assert_eq!(accumulator.l1_gas_count, Default::default()); assert_eq!(accumulator.new_factory_deps.len(), 0); assert_eq!(accumulator.block_execution_metrics.l2_l1_logs, 0); - - let tx_data: TransactionData = tx.into(); - assert_eq!(accumulator.txs_encoding_size, tx_data.into_tokens().len()); + assert_eq!(accumulator.txs_encoding_size, expected_tx_size); } } diff --git a/core/bin/zksync_core/src/state_keeper/updates/mod.rs b/core/bin/zksync_core/src/state_keeper/updates/mod.rs index e5939f3ceb0d..a79d328ba4c1 100644 --- a/core/bin/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/updates/mod.rs @@ -1,10 +1,12 @@ +use std::mem; + use vm::{vm::VmTxExecutionResult, vm_with_bootloader::BlockContextMode}; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, tx::tx_execution_info::{ExecutionMetrics, VmExecutionLogs}, - Transaction, + Address, L1BatchNumber, MiniblockNumber, Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -66,13 +68,28 @@ impl UpdatesManager { self.fair_l2_gas_price } - pub(crate) fn base_fee_per_gas(&self) -> u64 { - self.base_fee_per_gas + pub(crate) fn seal_miniblock_command( + &self, + l1_batch_number: L1BatchNumber, + miniblock_number: MiniblockNumber, + l2_erc20_bridge_addr: Address, + ) -> MiniblockSealCommand { + MiniblockSealCommand { + l1_batch_number, + miniblock_number, + miniblock: self.miniblock.clone(), + first_tx_index: self.l1_batch.executed_transactions.len(), + l1_gas_price: self.l1_gas_price, + fair_l2_gas_price: self.fair_l2_gas_price, + base_fee_per_gas: self.base_fee_per_gas, + base_system_contracts_hashes: self.base_system_contract_hashes, + l2_erc20_bridge_addr, + } } pub(crate) fn extend_from_executed_transaction( &mut self, - tx: &Transaction, + tx: Transaction, tx_execution_result: VmTxExecutionResult, compressed_bytecodes: Vec, tx_l1_gas_this_tx: BlockGasCount, @@ -96,9 +113,11 @@ impl UpdatesManager { .extend_from_fictive_transaction(vm_execution_logs); } - pub(crate) fn seal_miniblock(&mut self, new_miniblock_timestamp: u64) { + /// Pushes a new miniblock with the specified timestamp into this manager. The previously + /// held miniblock is considered sealed and is used to extend the L1 batch data. + pub(crate) fn push_miniblock(&mut self, new_miniblock_timestamp: u64) { let new_miniblock_updates = MiniblockUpdates::new(new_miniblock_timestamp); - let old_miniblock_updates = std::mem::replace(&mut self.miniblock, new_miniblock_updates); + let old_miniblock_updates = mem::replace(&mut self.miniblock, new_miniblock_updates); self.l1_batch .extend_from_sealed_miniblock(old_miniblock_updates); @@ -119,77 +138,46 @@ impl UpdatesManager { pub(crate) fn pending_txs_encoding_size(&self) -> usize { self.l1_batch.txs_encoding_size + self.miniblock.txs_encoding_size } +} - pub(crate) fn get_tx_by_index(&self, index: usize) -> &Transaction { - if index < self.l1_batch.executed_transactions.len() { - &self.l1_batch.executed_transactions[index].transaction - } else if index < self.pending_executed_transactions_len() { - &self.miniblock.executed_transactions[index - self.l1_batch.executed_transactions.len()] - .transaction - } else { - panic!("Incorrect index provided"); - } - } +/// Command to seal a miniblock containing all necessary data for it. +#[derive(Debug)] +pub(crate) struct MiniblockSealCommand { + pub l1_batch_number: L1BatchNumber, + pub miniblock_number: MiniblockNumber, + pub miniblock: MiniblockUpdates, + pub first_tx_index: usize, + pub l1_gas_price: u64, + pub fair_l2_gas_price: u64, + pub base_fee_per_gas: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub l2_erc20_bridge_addr: Address, } #[cfg(test)] mod tests { use super::*; - use crate::gas_tracker::new_block_gas_count; - use vm::vm::VmPartialExecutionResult; - use vm::vm_with_bootloader::{BlockContext, DerivedBlockContext}; - use zksync_types::tx::tx_execution_info::{TxExecutionStatus, VmExecutionLogs}; - use zksync_types::{l2::L2Tx, Address, Nonce, H256, U256}; + use crate::{ + gas_tracker::new_block_gas_count, + state_keeper::tests::{ + create_execution_result, create_transaction, create_updates_manager, + }, + }; #[test] fn apply_miniblock() { // Init accumulators. - let block_context = BlockContextMode::NewBlock( - DerivedBlockContext { - context: BlockContext { - block_number: 0, - block_timestamp: 0, - l1_gas_price: 0, - fair_l2_gas_price: 0, - operator_address: Default::default(), - }, - base_fee: 0, - }, - 0.into(), - ); - let mut updates_manager = UpdatesManager::new(&block_context, Default::default()); + let mut updates_manager = create_updates_manager(); assert_eq!(updates_manager.pending_executed_transactions_len(), 0); // Apply tx. - let mut tx = L2Tx::new( - Default::default(), - Default::default(), - Nonce(0), - Default::default(), - Address::default(), - U256::zero(), - None, - Default::default(), - ); - tx.set_input(H256::random().0.to_vec(), H256::random()); + let tx = create_transaction(10, 100); updates_manager.extend_from_executed_transaction( - &tx.into(), - VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: VmPartialExecutionResult { - logs: VmExecutionLogs::default(), - revert_reason: None, - contracts_used: 0, - cycles_used: 0, - computational_gas_used: 0, - }, - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, - }, + tx, + create_execution_result(0, []), vec![], new_block_gas_count(), - Default::default(), + ExecutionMetrics::default(), ); // Check that only pending state is updated. @@ -198,7 +186,7 @@ mod tests { assert_eq!(updates_manager.l1_batch.executed_transactions.len(), 0); // Seal miniblock. - updates_manager.seal_miniblock(2); + updates_manager.push_miniblock(2); // Check that L1 batch updates are the same with the pending state // and miniblock updates are empty. diff --git a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs index e794d1746415..f77b1b133282 100644 --- a/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/bin/zksync_core/src/sync_layer/batch_status_updater.rs @@ -1,83 +1,334 @@ use std::time::{Duration, Instant}; +use chrono::{DateTime, Utc}; use tokio::sync::watch::Receiver; use zksync_dal::ConnectionPool; -use zksync_types::aggregated_operations::AggregatedActionType; +use zksync_types::{ + aggregated_operations::AggregatedActionType, explorer_api::BlockDetails, L1BatchNumber, + MiniblockNumber, H256, +}; -use super::ActionQueue; +use zksync_web3_decl::{ + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::ZksNamespaceClient, + RpcResult, +}; -/// The task that keeps checking for the new batch status changes and persists them in the database. -pub fn run_batch_status_updater( +/// Represents a change in the batch status. +/// It may be a batch being committed, proven or executed. +#[derive(Debug)] +pub(crate) struct BatchStatusChange { + pub(crate) number: L1BatchNumber, + pub(crate) l1_tx_hash: H256, + pub(crate) happened_at: DateTime, +} + +#[derive(Debug, Default)] +struct StatusChanges { + commit: Vec, + prove: Vec, + execute: Vec, +} + +impl StatusChanges { + fn new() -> Self { + Self::default() + } + + /// Returns true if there are no status changes. + fn is_empty(&self) -> bool { + self.commit.is_empty() && self.prove.is_empty() && self.execute.is_empty() + } +} + +/// Module responsible for fetching the batch status changes, i.e. one that monitors whether the +/// locally applied batch was committed, proven or executed on L1. +/// +/// In essence, it keeps track of the last batch number per status, and periodically polls the main +/// node on these batches in order to see whether the status has changed. If some changes were picked up, +/// the module updates the database to mirror the state observable from the main node. +#[derive(Debug)] +pub struct BatchStatusUpdater { + client: HttpClient, pool: ConnectionPool, - actions: ActionQueue, - stop_receiver: Receiver, -) { - loop { - if *stop_receiver.borrow() { - vlog::info!("Stop signal receiver, exiting the batch status updater routine"); - return; + + last_executed_l1_batch: L1BatchNumber, + last_proven_l1_batch: L1BatchNumber, + last_committed_l1_batch: L1BatchNumber, +} + +impl BatchStatusUpdater { + pub async fn new(main_node_url: &str, pool: ConnectionPool) -> Self { + let client = HttpClientBuilder::default() + .build(main_node_url) + .expect("Unable to create a main node client"); + + let mut storage = pool.access_storage_tagged("sync_layer").await; + let last_executed_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_executed_on_eth() + .await + .unwrap_or_default(); + let last_proven_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_proven_on_eth() + .await + .unwrap_or_default(); + let last_committed_l1_batch = storage + .blocks_dal() + .get_number_of_last_block_committed_on_eth() + .await + .unwrap_or_default(); + drop(storage); + + Self { + client, + pool, + + last_committed_l1_batch, + last_proven_l1_batch, + last_executed_l1_batch, } + } + + pub async fn run(mut self, stop_receiver: Receiver) { + loop { + if *stop_receiver.borrow() { + vlog::info!("Stop signal received, exiting the batch status updater routine"); + return; + } + // Status changes are created externally, so that even if we will receive a network error + // while requesting the changes, we will be able to process what we already fetched. + let mut status_changes = StatusChanges::new(); + if let Err(err) = self.get_status_changes(&mut status_changes).await { + vlog::warn!("Failed to get status changes from the database: {err}"); + }; + if status_changes.is_empty() { + const DELAY_INTERVAL: Duration = Duration::from_secs(5); + tokio::time::sleep(DELAY_INTERVAL).await; + continue; + } + + self.apply_status_changes(status_changes).await; + } + } + + /// Goes through the already fetched batches trying to update their statuses. + /// Returns a collection of the status updates grouped by the operation type. + /// + /// Fetched changes are capped by the last locally applied batch number, so + /// it's safe to assume that every status change can safely be applied (no status + /// changes "from the future"). + async fn get_status_changes(&self, status_changes: &mut StatusChanges) -> RpcResult<()> { let start = Instant::now(); - let mut storage = pool.access_storage_blocking(); - // Anything past this batch is not saved to the database. - let last_sealed_batch = storage.blocks_dal().get_newest_block_header(); - - let changes = actions.take_status_changes(last_sealed_batch.number); - if changes.is_empty() { - const DELAY_INTERVAL: Duration = Duration::from_secs(5); - std::thread::sleep(DELAY_INTERVAL); - continue; + let last_sealed_batch = self + .pool + .access_storage_tagged("sync_layer") + .await + .blocks_dal() + .get_newest_block_header() + .await + .number; + + // We don't want to change the internal state until we actually persist the changes. + let mut last_committed_l1_batch = self.last_committed_l1_batch; + let mut last_proven_l1_batch = self.last_proven_l1_batch; + let mut last_executed_l1_batch = self.last_executed_l1_batch; + + assert!( + last_executed_l1_batch <= last_proven_l1_batch, + "Incorrect local state: executed batch must be proven" + ); + assert!( + last_proven_l1_batch <= last_committed_l1_batch, + "Incorrect local state: proven batch must be committed" + ); + assert!( + last_committed_l1_batch <= last_sealed_batch, + "Incorrect local state: unkonwn batch marked as committed" + ); + + let mut batch = last_executed_l1_batch.next(); + // In this loop we try to progress on the batch statuses, utilizing the same request to the node to potentially + // update all three statuses (e.g. if the node is still syncing), but also skipping the gaps in the statuses + // (e.g. if the last executed batch is 10, but the last proven is 20, we don't need to check the batches 11-19). + while batch <= last_sealed_batch { + // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not + // be sent to L1. + let request_start = Instant::now(); + let Some((start_miniblock, _)) = self.client.get_miniblock_range(batch).await? else { + return Ok(()); + }; + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_miniblock_range", + "actor" => "batch_status_fetcher" + ); + + // We could've used any miniblock from the range, all of them share the same info. + let request_start = Instant::now(); + let Some(batch_info) = self + .client + .get_block_details(MiniblockNumber(start_miniblock.as_u32())) + .await? + else { + // We cannot recover from an external API inconsistency. + panic!( + "Node API is inconsistent: miniblock {} was reported to be a part of {} L1batch, \ + but API has no information about this miniblock", start_miniblock, batch + ); + }; + metrics::histogram!( + "external_node.fetcher.requests", + request_start.elapsed(), + "stage" => "get_block_details", + "actor" => "batch_status_fetcher" + ); + + Self::update_committed_batch(status_changes, &batch_info, &mut last_committed_l1_batch); + Self::update_proven_batch(status_changes, &batch_info, &mut last_proven_l1_batch); + Self::update_executed_batch(status_changes, &batch_info, &mut last_executed_l1_batch); + + // Check whether we can skip a part of the range. + if batch_info.commit_tx_hash.is_none() { + // No committed batches after this one. + break; + } else if batch_info.prove_tx_hash.is_none() && batch < last_committed_l1_batch { + // The interval between this batch and the last committed one is not proven. + batch = last_committed_l1_batch.next(); + } else if batch_info.executed_at.is_none() && batch < last_proven_l1_batch { + // The interval between this batch and the last proven one is not executed. + batch = last_proven_l1_batch.next(); + } else { + batch += 1; + } } - for change in changes.commit.into_iter() { + metrics::histogram!("external_node.update_batch_statuses", start.elapsed()); + Ok(()) + } + + fn update_committed_batch( + status_changes: &mut StatusChanges, + batch_info: &BlockDetails, + last_committed_l1_batch: &mut L1BatchNumber, + ) { + if batch_info.commit_tx_hash.is_some() + && batch_info.l1_batch_number == last_committed_l1_batch.next() + { + assert!( + batch_info.committed_at.is_some(), + "Malformed API response: batch is committed, but has no commit timestamp" + ); + status_changes.commit.push(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.commit_tx_hash.unwrap(), + happened_at: batch_info.committed_at.unwrap(), + }); + vlog::info!("Batch {}: committed", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "committed"); + *last_committed_l1_batch += 1; + } + } + + fn update_proven_batch( + status_changes: &mut StatusChanges, + batch_info: &BlockDetails, + last_proven_l1_batch: &mut L1BatchNumber, + ) { + if batch_info.prove_tx_hash.is_some() + && batch_info.l1_batch_number == last_proven_l1_batch.next() + { + assert!( + batch_info.proven_at.is_some(), + "Malformed API response: batch is proven, but has no prove timestamp" + ); + status_changes.prove.push(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.prove_tx_hash.unwrap(), + happened_at: batch_info.proven_at.unwrap(), + }); + vlog::info!("Batch {}: proven", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "proven"); + *last_proven_l1_batch += 1; + } + } + + fn update_executed_batch( + status_changes: &mut StatusChanges, + batch_info: &BlockDetails, + last_executed_l1_batch: &mut L1BatchNumber, + ) { + if batch_info.execute_tx_hash.is_some() + && batch_info.l1_batch_number == last_executed_l1_batch.next() + { assert!( - change.number <= last_sealed_batch.number, - "Commit status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", - last_sealed_batch.number, - change + batch_info.executed_at.is_some(), + "Malformed API response: batch is executed, but has no execute timestamp" ); + status_changes.execute.push(BatchStatusChange { + number: batch_info.l1_batch_number, + l1_tx_hash: batch_info.execute_tx_hash.unwrap(), + happened_at: batch_info.executed_at.unwrap(), + }); + vlog::info!("Batch {}: executed", batch_info.l1_batch_number); + metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "executed"); + *last_executed_l1_batch += 1; + } + } + + /// Inserts the provided status changes into the database. + /// This method is not transactional, so it can save only a part of the changes, which is fine: + /// after the restart the updater will continue from the last saved state. + /// + /// The status changes are applied to the database by inserting bogus confirmed transactions (with + /// some fields missing/substituted) only to satisfy API needs; this component doesn't expect the updated + /// tables to be ever accessed by the `eth_sender` module. + async fn apply_status_changes(&mut self, changes: StatusChanges) { + let start = Instant::now(); + + let mut storage = self.pool.access_storage_tagged("sync_layer").await; + + for change in changes.commit.into_iter() { vlog::info!( "Commit status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, change.happened_at ); - storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( - change.number, - AggregatedActionType::CommitBlocks, - change.l1_tx_hash, - change.happened_at, - ); + storage + .eth_sender_dal() + .insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::CommitBlocks, + change.l1_tx_hash, + change.happened_at, + ) + .await; + self.last_committed_l1_batch = change.number; } for change in changes.prove.into_iter() { - assert!( - change.number <= last_sealed_batch.number, - "Prove status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", - last_sealed_batch.number, - change - ); vlog::info!( "Prove status change: number {}, hash {}, happened at {}", change.number, change.l1_tx_hash, change.happened_at ); - storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( - change.number, - AggregatedActionType::PublishProofBlocksOnchain, - change.l1_tx_hash, - change.happened_at, - ); + storage + .eth_sender_dal() + .insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::PublishProofBlocksOnchain, + change.l1_tx_hash, + change.happened_at, + ) + .await; + self.last_proven_l1_batch = change.number; } for change in changes.execute.into_iter() { - assert!( - change.number <= last_sealed_batch.number, - "Execute status change for the batch that is not sealed yet. Last sealed batch: {}, change: {:?}", - last_sealed_batch.number, - change - ); vlog::info!( "Execute status change: number {}, hash {}, happened at {}", change.number, @@ -85,12 +336,16 @@ pub fn run_batch_status_updater( change.happened_at ); - storage.eth_sender_dal().insert_bogus_confirmed_eth_tx( - change.number, - AggregatedActionType::ExecuteBlocks, - change.l1_tx_hash, - change.happened_at, - ); + storage + .eth_sender_dal() + .insert_bogus_confirmed_eth_tx( + change.number, + AggregatedActionType::ExecuteBlocks, + change.l1_tx_hash, + change.happened_at, + ) + .await; + self.last_executed_l1_batch = change.number; } metrics::histogram!( diff --git a/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs b/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs index 462f7e2b40a1..6093548ff4c3 100644 --- a/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs +++ b/core/bin/zksync_core/src/sync_layer/cached_main_node_client.rs @@ -1,18 +1,14 @@ use std::{collections::HashMap, time::Instant}; -use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber, Transaction, U64}; +use zksync_types::{api::en::SyncBlock, MiniblockNumber, U64}; use zksync_web3_decl::{ - jsonrpsee::{ - core::RpcResult, - http_client::{HttpClient, HttpClientBuilder}, - }, - namespaces::{EthNamespaceClient, ZksNamespaceClient}, + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::{EnNamespaceClient, EthNamespaceClient}, + RpcResult, }; /// Maximum number of concurrent requests to the main node. const MAX_CONCURRENT_REQUESTS: usize = 100; -/// Set of fields fetched together for a single miniblock. -type MiniblockData = (BlockDetails, Option<(U64, U64)>, Vec); /// This is a temporary implementation of a cache layer for the main node HTTP requests. /// It was introduced to quickly develop a way to fetch data from the main node concurrently, @@ -33,9 +29,7 @@ pub(super) struct CachedMainNodeClient { /// Earliest miniblock number that is not yet cached. /// Used as a marker to refill the cache. next_refill_at: MiniblockNumber, - miniblock_headers: HashMap, - batch_ranges: HashMap, - txs: HashMap>, + blocks: HashMap, } impl CachedMainNodeClient { @@ -46,54 +40,20 @@ impl CachedMainNodeClient { Self { client, next_refill_at: MiniblockNumber(0), - miniblock_headers: Default::default(), - batch_ranges: Default::default(), - txs: Default::default(), + blocks: Default::default(), } } - /// Cached version of [`HttpClient::get_raw_block_transaction`]. - pub async fn get_raw_block_transactions( - &self, - miniblock: MiniblockNumber, - ) -> RpcResult> { - let txs = { self.txs.get(&miniblock).cloned() }; - metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_raw_block_transactions"); - match txs { - Some(txs) => { - metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_raw_block_transactions"); - Ok(txs) + /// Cached version of [`HttpClient::sync_l2_block`]. + pub async fn sync_l2_block(&self, miniblock: MiniblockNumber) -> RpcResult> { + let block = { self.blocks.get(&miniblock).cloned() }; + metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "sync_l2_block"); + match block { + Some(block) => { + metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "sync_l2_block"); + Ok(Some(block)) } - None => self.client.get_raw_block_transactions(miniblock).await, - } - } - - /// Cached version of [`HttpClient::get_block_range`]. - pub async fn get_block_details( - &self, - miniblock: MiniblockNumber, - ) -> RpcResult> { - let block_details = self.miniblock_headers.get(&miniblock).cloned(); - metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_block_details"); - match block_details { - Some(block_details) => { - metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_block_details"); - Ok(Some(block_details)) - } - None => self.client.get_block_details(miniblock).await, - } - } - - /// Cached version of [`HttpClient::get_miniblock_range`]. - pub async fn get_miniblock_range(&self, batch: L1BatchNumber) -> RpcResult> { - let range = self.batch_ranges.get(&batch).cloned(); - metrics::increment_counter!("external_node.fetcher.cache.total", "method" => "get_miniblock_range"); - match range { - Some(range) => { - metrics::increment_counter!("external_node.fetcher.cache.hit", "method" => "get_miniblock_range"); - Ok(Some(range)) - } - None => self.client.get_miniblock_range(batch).await, + None => self.client.sync_l2_block(miniblock, true).await, } } @@ -105,12 +65,7 @@ impl CachedMainNodeClient { /// Removes a miniblock data from the cache. pub fn forget_miniblock(&mut self, miniblock: MiniblockNumber) { - self.miniblock_headers.remove(&miniblock); - self.txs.remove(&miniblock); - } - - pub fn forget_l1_batch(&mut self, l1_batch: L1BatchNumber) { - self.batch_ranges.remove(&l1_batch); + self.blocks.remove(&miniblock); } pub async fn populate_miniblocks_cache( @@ -133,19 +88,13 @@ impl CachedMainNodeClient { // If the miniblock is already in the cache, we don't need to fetch it. !self.has_miniblock(miniblock) }) - .map(|miniblock| Self::fetch_one_miniblock(&self.client, miniblock)); + .map(|block_number| self.client.sync_l2_block(block_number, true)); let results = futures::future::join_all(task_futures).await; for result in results { - if let Ok(Some((header, range, txs))) = result { - let miniblock = header.number; - let batch = header.l1_batch_number; - self.miniblock_headers.insert(miniblock, header); - if let Some(range) = range { - self.batch_ranges.insert(batch, range); - } - self.txs.insert(miniblock, txs); - self.next_refill_at = self.next_refill_at.max(miniblock + 1); + if let Ok(Some(block)) = result { + self.next_refill_at = self.next_refill_at.max(block.number + 1); + self.blocks.insert(block.number, block); } else { // At the cache level, it's fine to just silence errors. // The entry won't be included into the cache, and whoever uses the cache, will have to process @@ -157,29 +106,6 @@ impl CachedMainNodeClient { } fn has_miniblock(&self, miniblock: MiniblockNumber) -> bool { - self.miniblock_headers.contains_key(&miniblock) - } - - async fn fetch_one_miniblock( - client: &HttpClient, - miniblock: MiniblockNumber, - ) -> RpcResult> { - // Error propagation here would mean that these entries won't appear in the cache. - // This would cause a cache miss, but generally it shouldn't be a problem as long as the API errors are rare. - // If the API returns lots of errors, that's a problem regardless of caching. - let start = Instant::now(); - let header = client.get_block_details(miniblock).await; - metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_block_details"); - let Some(header) = header? else { return Ok(None) }; - - let start = Instant::now(); - let miniblock_range = client.get_miniblock_range(header.l1_batch_number).await?; - metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_miniblock_range"); - - let start = Instant::now(); - let miniblock_txs = client.get_raw_block_transactions(miniblock).await?; - metrics::histogram!("external_node.fetcher.cache.requests", start.elapsed(), "stage" => "get_raw_block_transactions"); - - Ok(Some((header, miniblock_range, miniblock_txs))) + self.blocks.contains_key(&miniblock) } } diff --git a/core/bin/zksync_core/src/sync_layer/external_io.rs b/core/bin/zksync_core/src/sync_layer/external_io.rs index 8f5650ead78d..321b8d0d93bf 100644 --- a/core/bin/zksync_core/src/sync_layer/external_io.rs +++ b/core/bin/zksync_core/src/sync_layer/external_io.rs @@ -1,18 +1,20 @@ -use std::convert::TryFrom; -use std::time::Duration; +use std::{collections::HashMap, convert::TryFrom, iter::FromIterator, time::Duration}; use super::genesis::fetch_system_contract_by_hash; use actix_rt::time::Instant; +use async_trait::async_trait; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; -use zksync_types::{l1::L1Tx, l2::L2Tx, L1BatchNumber, MiniblockNumber, Transaction, H256}; +use zksync_types::{ + ethabi::Address, l1::L1Tx, l2::L2Tx, L1BatchNumber, L1BlockNumber, MiniblockNumber, + Transaction, H256, U256, +}; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; use crate::state_keeper::{ extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_until, StateKeeperStats}, - seal_logic::{seal_l1_batch_impl, seal_miniblock_impl}, + common::{l1_batch_params, load_pending_batch, poll_iters}, L1BatchParams, PendingBatchData, StateKeeperIO, }, seal_criteria::SealerFn, @@ -76,27 +78,27 @@ impl ExternalNodeSealer { pub struct ExternalIO { pool: ConnectionPool, - // Grafana metrics - statistics: StateKeeperStats, - current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, actions: ActionQueue, sync_state: SyncState, main_node_url: String, + + /// Required to extract newly added tokens. + l2_erc20_bridge_addr: Address, } impl ExternalIO { - pub fn new( + pub async fn new( pool: ConnectionPool, actions: ActionQueue, sync_state: SyncState, main_node_url: String, + l2_erc20_bridge_addr: Address, ) -> Self { - let mut storage = pool.access_storage_blocking(); - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); - let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); - let num_contracts = storage.storage_load_dal().load_number_of_contracts(); + let mut storage = pool.access_storage_tagged("sync_layer").await; + let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; drop(storage); vlog::info!( @@ -109,21 +111,37 @@ impl ExternalIO { Self { pool, - statistics: StateKeeperStats { num_contracts }, current_l1_batch_number: last_sealed_block_header.number + 1, current_miniblock_number: last_miniblock_number + 1, actions, sync_state, main_node_url, + l2_erc20_bridge_addr, } } - fn get_base_system_contract(&self, hash: H256) -> SystemContractCode { + async fn load_previous_l1_batch_hash(&self) -> U256 { + let mut storage = self.pool.access_storage_tagged("sync_layer").await; + + let stage_started_at: Instant = Instant::now(); + let (hash, _) = + extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) + .await; + metrics::histogram!( + "server.state_keeper.wait_for_prev_hash_time", + stage_started_at.elapsed() + ); + hash + } + + async fn get_base_system_contract(&self, hash: H256) -> SystemContractCode { let bytecode = self .pool - .access_storage_blocking() + .access_storage_tagged("sync_layer") + .await .storage_dal() - .get_factory_dep(hash); + .get_factory_dep(hash) + .await; match bytecode { Some(bytecode) => SystemContractCode { @@ -132,27 +150,26 @@ impl ExternalIO { }, None => { let main_node_url = self.main_node_url.clone(); - let contract = crate::block_on(async move { - vlog::info!("Fetching base system contract bytecode from the main node"); - fetch_system_contract_by_hash(&main_node_url, hash) - .await - .expect("Failed to fetch base system contract bytecode from the main node") - }); + vlog::info!("Fetching base system contract bytecode from the main node"); + let contract = fetch_system_contract_by_hash(&main_node_url, hash) + .await + .expect("Failed to fetch base system contract bytecode from the main node"); self.pool - .access_storage_blocking() + .access_storage_tagged("sync_layer") + .await .storage_dal() .insert_factory_deps( self.current_miniblock_number, - vec![(contract.hash, be_words_to_bytes(&contract.code))] - .into_iter() - .collect(), - ); + &HashMap::from_iter([(contract.hash, be_words_to_bytes(&contract.code))]), + ) + .await; contract } } } } +#[async_trait] impl StateKeeperIO for ExternalIO { fn current_l1_batch_number(&self) -> L1BatchNumber { self.current_l1_batch_number @@ -162,12 +179,13 @@ impl StateKeeperIO for ExternalIO { self.current_miniblock_number } - fn load_pending_batch(&mut self) -> Option { - let mut storage = self.pool.access_storage_blocking(); + async fn load_pending_batch(&mut self) -> Option { + let mut storage = self.pool.access_storage_tagged("sync_layer").await; let fee_account = storage .blocks_dal() .get_block_header(self.current_l1_batch_number - 1) + .await .unwrap_or_else(|| { panic!( "No block header for batch {}", @@ -175,14 +193,14 @@ impl StateKeeperIO for ExternalIO { ) }) .fee_account_address; - load_pending_batch(&mut storage, self.current_l1_batch_number, fee_account) + load_pending_batch(&mut storage, self.current_l1_batch_number, fee_account).await } - fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { + async fn wait_for_new_batch_params(&mut self, max_wait: Duration) -> Option { vlog::debug!("Waiting for the new batch params"); - poll_until(POLL_INTERVAL, max_wait, || { - match self.actions.pop_action()? { - SyncAction::OpenBatch { + for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { + match self.actions.pop_action() { + Some(SyncAction::OpenBatch { number, timestamp, l1_gas_price, @@ -193,36 +211,21 @@ impl StateKeeperIO for ExternalIO { default_aa, }, operator_address, - } => { + }) => { assert_eq!( number, self.current_l1_batch_number, "Batch number mismatch" ); - vlog::info!("Getting previous block hash"); - let previous_l1_batch_hash = { - let mut storage = self.pool.access_storage_blocking(); - - let stage_started_at: Instant = Instant::now(); - let hash = extractors::wait_for_prev_l1_batch_state_root_unchecked( - &mut storage, - self.current_l1_batch_number, - ); - metrics::histogram!( - "server.state_keeper.wait_for_prev_hash_time", - stage_started_at.elapsed() - ); - hash - }; - vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); - - vlog::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); + vlog::info!("Getting previous L1 batch hash"); + let previous_l1_batch_hash = self.load_previous_l1_batch_hash().await; + vlog::info!("Previous L1 batch hash: {previous_l1_batch_hash}"); let base_system_contracts = BaseSystemContracts { - bootloader: self.get_base_system_contract(bootloader), - default_aa: self.get_base_system_contract(default_aa), + bootloader: self.get_base_system_contract(bootloader).await, + default_aa: self.get_base_system_contract(default_aa).await, }; - Some(l1_batch_params( + return Some(l1_batch_params( number, operator_address, timestamp, @@ -230,69 +233,84 @@ impl StateKeeperIO for ExternalIO { l1_gas_price, l2_fair_gas_price, base_system_contracts, - )) + )); } - other => { + Some(other) => { panic!("Unexpected action in the action queue: {:?}", other); } + None => { + tokio::time::sleep(POLL_INTERVAL).await; + } } - }) + } + None } - fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option { + async fn wait_for_new_miniblock_params(&mut self, max_wait: Duration) -> Option { // Wait for the next miniblock to appear in the queue. - poll_until(POLL_INTERVAL, max_wait, || { - match self.actions.peek_action()? { - SyncAction::Miniblock { number, timestamp } => { + let actions = &self.actions; + for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { + match actions.peek_action() { + Some(SyncAction::Miniblock { number, timestamp }) => { self.actions.pop_action(); // We found the miniblock, remove it from the queue. assert_eq!( number, self.current_miniblock_number, "Miniblock number mismatch" ); - Some(timestamp) + return Some(timestamp); } - SyncAction::SealBatch => { + Some(SyncAction::SealBatch) => { // We've reached the next batch, so this situation would be handled by the batch sealer. // No need to pop the action from the queue. // It also doesn't matter which timestamp we return, since there will be no more miniblocks in this // batch. We return 0 to make it easy to detect if it ever appears somewhere. - Some(0) + return Some(0); } - other => { + Some(other) => { panic!( "Unexpected action in the queue while waiting for the next miniblock {:?}", other ); } + _ => { + tokio::time::sleep(POLL_INTERVAL).await; + continue; + } } - }) + } + None } - fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { + async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { + let actions = &self.actions; vlog::debug!( "Waiting for the new tx, next action is {:?}", - self.actions.peek_action() + actions.peek_action() ); - poll_until(POLL_INTERVAL, max_wait, || { + for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { // We keep polling until we get any item from the queue. // Once we have the item, it'll be either a transaction, or a seal request. // Whatever item it is, we don't have to poll anymore and may exit, thus double option use. - match self.actions.peek_action()? { - SyncAction::Tx(_) => { - let SyncAction::Tx(tx) = self.actions.pop_action().unwrap() else { unreachable!() }; - Some(Some(*tx)) + match actions.peek_action() { + Some(SyncAction::Tx(_)) => { + let SyncAction::Tx(tx) = actions.pop_action().unwrap() else { unreachable!() }; + return Some(*tx); + } + _ => { + tokio::time::sleep(POLL_INTERVAL).await; + continue; } - _ => Some(None), } - })? + } + None } - fn rollback(&mut self, tx: &Transaction) { + async fn rollback(&mut self, tx: Transaction) { // We are replaying the already sealed batches so no rollbacks are expected to occur. panic!("Rollback requested: {:?}", tx); } - fn reject(&mut self, tx: &Transaction, error: &str) { + async fn reject(&mut self, tx: &Transaction, error: &str) { // We are replaying the already executed transactions so no rejections are expected to occur. panic!( "Reject requested because of the following error: {}.\n Transaction is: {:?}", @@ -300,7 +318,7 @@ impl StateKeeperIO for ExternalIO { ); } - fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { + async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { match self.actions.pop_action() { Some(SyncAction::SealMiniblock) => {} other => panic!( @@ -309,24 +327,26 @@ impl StateKeeperIO for ExternalIO { ), }; - let mut storage = self.pool.access_storage_blocking(); - let mut transaction = storage.start_transaction_blocking(); + let mut storage = self.pool.access_storage_tagged("sync_layer").await; + let mut transaction = storage.start_transaction().await; let start = Instant::now(); // We don't store the transactions in the database until they're executed to not overcomplicate the state // recovery on restart. So we have to store them here. for tx in updates_manager.miniblock.executed_transactions.iter() { if let Ok(l1_tx) = L1Tx::try_from(tx.transaction.clone()) { - // Using `Default` for `l1_block_number` is OK here, since it's only used to track the last processed - // L1 number in the `eth_watch` module. + let l1_block_number = L1BlockNumber(l1_tx.common_data.eth_block as u32); + transaction .transactions_dal() - .insert_transaction_l1(l1_tx, Default::default()) + .insert_transaction_l1(l1_tx, l1_block_number) + .await; } else if let Ok(l2_tx) = L2Tx::try_from(tx.transaction.clone()) { // Using `Default` for execution metrics should be OK here, since this data is not used on the EN. transaction .transactions_dal() - .insert_transaction_l2(l2_tx, Default::default()); + .insert_transaction_l2(l2_tx, Default::default()) + .await; } else { unreachable!("Transaction {:?} is neither L1 nor L2", tx.transaction); } @@ -338,15 +358,13 @@ impl StateKeeperIO for ExternalIO { ); // Now transactions are stored, and we may mark them as executed. - seal_miniblock_impl( - self.current_miniblock_number, + let command = updates_manager.seal_miniblock_command( self.current_l1_batch_number, - &mut self.statistics, - &mut transaction, - updates_manager, - false, + self.current_miniblock_number, + self.l2_erc20_bridge_addr, ); - transaction.commit_blocking(); + command.seal(&mut transaction).await; + transaction.commit().await; self.sync_state .set_local_block(self.current_miniblock_number); @@ -354,7 +372,7 @@ impl StateKeeperIO for ExternalIO { vlog::info!("Miniblock {} is sealed", self.current_miniblock_number); } - fn seal_l1_batch( + async fn seal_l1_batch( &mut self, block_result: vm::VmBlockResult, updates_manager: UpdatesManager, @@ -368,16 +386,17 @@ impl StateKeeperIO for ExternalIO { ), }; - let mut storage = self.pool.access_storage_blocking(); - seal_l1_batch_impl( - self.current_miniblock_number, - self.current_l1_batch_number, - &mut self.statistics, - &mut storage, - block_result, - updates_manager, - block_context, - ); + let mut storage = self.pool.access_storage_tagged("sync_layer").await; + updates_manager + .seal_l1_batch( + &mut storage, + self.current_miniblock_number, + self.current_l1_batch_number, + block_result, + block_context, + self.l2_erc20_bridge_addr, + ) + .await; vlog::info!("Batch {} is sealed", self.current_l1_batch_number); diff --git a/core/bin/zksync_core/src/sync_layer/fetcher.rs b/core/bin/zksync_core/src/sync_layer/fetcher.rs index dc2cb3ab0e54..826aa45746c6 100644 --- a/core/bin/zksync_core/src/sync_layer/fetcher.rs +++ b/core/bin/zksync_core/src/sync_layer/fetcher.rs @@ -2,10 +2,11 @@ use std::time::{Duration, Instant}; use tokio::sync::watch::Receiver; -use crate::sync_layer::sync_action::{ActionQueue, BatchStatusChange, SyncAction}; +use crate::sync_layer::sync_action::{ActionQueue, SyncAction}; use zksync_dal::ConnectionPool; -use zksync_types::{explorer_api::BlockDetails, L1BatchNumber, MiniblockNumber}; -use zksync_web3_decl::jsonrpsee::core::{Error as RpcError, RpcResult}; +use zksync_types::{L1BatchNumber, MiniblockNumber}; +use zksync_web3_decl::jsonrpsee::core::Error as RpcError; +use zksync_web3_decl::RpcResult; use super::{cached_main_node_client::CachedMainNodeClient, SyncState}; @@ -19,30 +20,26 @@ pub struct MainNodeFetcher { current_l1_batch: L1BatchNumber, current_miniblock: MiniblockNumber, - last_executed_l1_batch: L1BatchNumber, - last_proven_l1_batch: L1BatchNumber, - last_committed_l1_batch: L1BatchNumber, - actions: ActionQueue, sync_state: SyncState, stop_receiver: Receiver, } impl MainNodeFetcher { - pub fn new( + pub async fn new( pool: ConnectionPool, main_node_url: &str, actions: ActionQueue, sync_state: SyncState, stop_receiver: Receiver, ) -> Self { - let mut storage = pool.access_storage_blocking(); - let last_sealed_block_header = storage.blocks_dal().get_newest_block_header(); - let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number(); + let mut storage = pool.access_storage_tagged("sync_layer").await; + let last_sealed_block_header = storage.blocks_dal().get_newest_block_header().await; + let last_miniblock_number = storage.blocks_dal().get_sealed_miniblock_number().await; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let was_new_batch_open = storage.blocks_dal().pending_batch_exists(); + let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await; // Miniblocks are always fully processed. let current_miniblock = last_miniblock_number + 1; @@ -55,19 +52,6 @@ impl MainNodeFetcher { last_sealed_block_header.number }; - let last_executed_l1_batch = storage - .blocks_dal() - .get_number_of_last_block_executed_on_eth() - .unwrap_or_default(); - let last_proven_l1_batch = storage - .blocks_dal() - .get_number_of_last_block_proven_on_eth() - .unwrap_or_default(); - let last_committed_l1_batch = storage - .blocks_dal() - .get_number_of_last_block_committed_on_eth() - .unwrap_or_default(); - let client = CachedMainNodeClient::build_client(main_node_url); Self { @@ -75,10 +59,6 @@ impl MainNodeFetcher { current_l1_batch, current_miniblock, - last_executed_l1_batch, - last_proven_l1_batch, - last_committed_l1_batch, - actions, sync_state, stop_receiver, @@ -129,17 +109,20 @@ impl MainNodeFetcher { self.client .populate_miniblocks_cache(self.current_miniblock, last_main_node_block) .await; - if self.actions.has_action_capacity() { + let has_action_capacity = self.actions.has_action_capacity(); + if has_action_capacity { progressed |= self.fetch_next_miniblock().await?; } - if self.actions.has_status_change_capacity() { - progressed |= self.update_batch_statuses().await?; - } if !progressed { // We didn't fetch any updated on this iteration, so to prevent a busy loop we wait a bit. - vlog::debug!("No updates to discover, waiting"); - std::thread::sleep(DELAY_INTERVAL); + let log_message = if has_action_capacity { + "No updates to discover, waiting for new blocks on the main node" + } else { + "Local action queue is full, waiting for state keeper to process the queue" + }; + vlog::debug!("{log_message}"); + tokio::time::sleep(DELAY_INTERVAL).await; } } } @@ -150,9 +133,9 @@ impl MainNodeFetcher { let start = Instant::now(); let request_start = Instant::now(); - let Some(miniblock_header) = self + let Some(block) = self .client - .get_block_details(self.current_miniblock) + .sync_l2_block(self.current_miniblock) .await? else { return Ok(false); @@ -160,96 +143,67 @@ impl MainNodeFetcher { metrics::histogram!( "external_node.fetcher.requests", request_start.elapsed(), - "stage" => "get_block_details", + "stage" => "sync_l2_block", "actor" => "miniblock_fetcher" ); let mut new_actions = Vec::new(); - if miniblock_header.l1_batch_number != self.current_l1_batch { + if block.l1_batch_number != self.current_l1_batch { assert_eq!( - miniblock_header.l1_batch_number, + block.l1_batch_number, self.current_l1_batch.next(), "Unexpected batch number in the next received miniblock" ); vlog::info!( "New batch: {}. Timestamp: {}", - miniblock_header.l1_batch_number, - miniblock_header.timestamp + block.l1_batch_number, + block.timestamp ); new_actions.push(SyncAction::OpenBatch { - number: miniblock_header.l1_batch_number, - timestamp: miniblock_header.timestamp, - l1_gas_price: miniblock_header.l1_gas_price, - l2_fair_gas_price: miniblock_header.l2_fair_gas_price, - base_system_contracts_hashes: miniblock_header.base_system_contracts_hashes, - operator_address: miniblock_header.operator_address, + number: block.l1_batch_number, + timestamp: block.timestamp, + l1_gas_price: block.l1_gas_price, + l2_fair_gas_price: block.l2_fair_gas_price, + base_system_contracts_hashes: block.base_system_contracts_hashes, + operator_address: block.operator_address, }); - metrics::gauge!("external_node.fetcher.l1_batch", miniblock_header.l1_batch_number.0 as f64, "status" => "open"); - - self.client.forget_l1_batch(self.current_l1_batch); + metrics::gauge!("external_node.fetcher.l1_batch", block.l1_batch_number.0 as f64, "status" => "open"); self.current_l1_batch += 1; } else { // New batch implicitly means a new miniblock, so we only need to push the miniblock action // if it's not a new batch. new_actions.push(SyncAction::Miniblock { - number: miniblock_header.number, - timestamp: miniblock_header.timestamp, + number: block.number, + timestamp: block.timestamp, }); - metrics::gauge!( - "external_node.fetcher.miniblock", - miniblock_header.number.0 as f64 - ); + metrics::gauge!("external_node.fetcher.miniblock", block.number.0 as f64); } - let request_start = Instant::now(); - let miniblock_txs = self - .client - .get_raw_block_transactions(self.current_miniblock) - .await? - .into_iter() - .map(|tx| SyncAction::Tx(Box::new(tx))); - metrics::histogram!( - "external_node.fetcher.requests", - request_start.elapsed(), - "stage" => "get_raw_block_transactions", - "actor" => "miniblock_fetcher" - ); - + let txs: Vec = block + .transactions + .expect("Transactions are always requested"); metrics::counter!( "server.processed_txs", - miniblock_txs.len() as u64, + txs.len() as u64, "stage" => "mempool_added" ); - new_actions.extend(miniblock_txs); - - // Check if this was the last miniblock in the batch. - // If we will receive `None` here, it would mean that it's the currently open batch and it was not sealed - // after the current miniblock. - let request_start = Instant::now(); - let is_last_miniblock_of_batch = self - .client - .get_miniblock_range(self.current_l1_batch) - .await? - .map(|(_, last)| last.as_u32() == miniblock_header.number.0) - .unwrap_or(false); - metrics::histogram!( - "external_node.fetcher.requests", - request_start.elapsed(), - "stage" => "get_miniblock_range", - "actor" => "miniblock_fetcher" - ); + new_actions.extend(txs.into_iter().map(SyncAction::from)); // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. - if is_last_miniblock_of_batch { + if block.last_in_batch { new_actions.push(SyncAction::SealBatch); } else { new_actions.push(SyncAction::SealMiniblock); } - vlog::info!("New miniblock: {}", miniblock_header.number); + vlog::info!( + "New miniblock: {} / {}", + block.number, + self.sync_state.get_main_node_block().max(block.number) + ); self.client.forget_miniblock(self.current_miniblock); self.current_miniblock += 1; self.actions.push_actions(new_actions); @@ -260,152 +214,4 @@ impl MainNodeFetcher { ); Ok(true) } - - /// Goes through the already fetched batches trying to update their statuses. - /// Returns `true` if at least one batch was updated, and `false` otherwise. - async fn update_batch_statuses(&mut self) -> RpcResult { - let start = Instant::now(); - assert!( - self.last_executed_l1_batch <= self.last_proven_l1_batch, - "Incorrect local state: executed batch must be proven" - ); - assert!( - self.last_proven_l1_batch <= self.last_committed_l1_batch, - "Incorrect local state: proven batch must be committed" - ); - assert!( - self.last_committed_l1_batch <= self.current_l1_batch, - "Incorrect local state: unkonwn batch marked as committed" - ); - - let mut applied_updates = false; - let mut batch = self.last_executed_l1_batch.next(); - // In this loop we try to progress on the batch statuses, utilizing the same request to the node to potentially - // update all three statuses (e.g. if the node is still syncing), but also skipping the gaps in the statuses - // (e.g. if the last executed batch is 10, but the last proven is 20, we don't need to check the batches 11-19). - while batch <= self.current_l1_batch { - // While we may receive `None` for the `self.current_l1_batch`, it's OK: open batch is guaranteed to not - // be sent to L1. - let request_start = Instant::now(); - let Some((start_miniblock, _)) = self.client.get_miniblock_range(batch).await? else { - return Ok(applied_updates); - }; - metrics::histogram!( - "external_node.fetcher.requests", - request_start.elapsed(), - "stage" => "get_miniblock_range", - "actor" => "batch_status_fetcher" - ); - - // We could've used any miniblock from the range, all of them share the same info. - let request_start = Instant::now(); - let Some(batch_info) = self - .client - .get_block_details(MiniblockNumber(start_miniblock.as_u32())) - .await? - else { - // We cannot recover from an external API inconsistency. - panic!( - "Node API is inconsistent: miniblock {} was reported to be a part of {} L1batch, \ - but API has no information about this miniblock", start_miniblock, batch - ); - }; - metrics::histogram!( - "external_node.fetcher.requests", - request_start.elapsed(), - "stage" => "get_block_details", - "actor" => "batch_status_fetcher" - ); - - applied_updates |= self.update_committed_batch(&batch_info); - applied_updates |= self.update_proven_batch(&batch_info); - applied_updates |= self.update_executed_batch(&batch_info); - - // Check whether we can skip a part of the range. - if batch_info.commit_tx_hash.is_none() { - // No committed batches after this one. - break; - } else if batch_info.prove_tx_hash.is_none() && batch < self.last_committed_l1_batch { - // The interval between this batch and the last committed one is not proven. - batch = self.last_committed_l1_batch.next(); - } else if batch_info.executed_at.is_none() && batch < self.last_proven_l1_batch { - // The interval between this batch and the last proven one is not executed. - batch = self.last_proven_l1_batch.next(); - } else { - batch += 1; - } - } - - metrics::histogram!("external_node.update_batch_statuses", start.elapsed()); - Ok(applied_updates) - } - - /// Returns `true` if batch info was updated. - fn update_committed_batch(&mut self, batch_info: &BlockDetails) -> bool { - if batch_info.commit_tx_hash.is_some() - && batch_info.l1_batch_number == self.last_committed_l1_batch.next() - { - assert!( - batch_info.committed_at.is_some(), - "Malformed API response: batch is committed, but has no commit timestamp" - ); - self.actions.push_commit_status_change(BatchStatusChange { - number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.commit_tx_hash.unwrap(), - happened_at: batch_info.committed_at.unwrap(), - }); - vlog::info!("Batch {}: committed", batch_info.l1_batch_number); - metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "committed"); - self.last_committed_l1_batch += 1; - true - } else { - false - } - } - - /// Returns `true` if batch info was updated. - fn update_proven_batch(&mut self, batch_info: &BlockDetails) -> bool { - if batch_info.prove_tx_hash.is_some() - && batch_info.l1_batch_number == self.last_proven_l1_batch.next() - { - assert!( - batch_info.proven_at.is_some(), - "Malformed API response: batch is proven, but has no prove timestamp" - ); - self.actions.push_prove_status_change(BatchStatusChange { - number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.prove_tx_hash.unwrap(), - happened_at: batch_info.proven_at.unwrap(), - }); - vlog::info!("Batch {}: proven", batch_info.l1_batch_number); - metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "proven"); - self.last_proven_l1_batch += 1; - true - } else { - false - } - } - - /// Returns `true` if batch info was updated. - fn update_executed_batch(&mut self, batch_info: &BlockDetails) -> bool { - if batch_info.execute_tx_hash.is_some() - && batch_info.l1_batch_number == self.last_executed_l1_batch.next() - { - assert!( - batch_info.executed_at.is_some(), - "Malformed API response: batch is executed, but has no execute timestamp" - ); - self.actions.push_execute_status_change(BatchStatusChange { - number: batch_info.l1_batch_number, - l1_tx_hash: batch_info.execute_tx_hash.unwrap(), - happened_at: batch_info.executed_at.unwrap(), - }); - vlog::info!("Batch {}: executed", batch_info.l1_batch_number); - metrics::gauge!("external_node.fetcher.l1_batch", batch_info.l1_batch_number.0 as f64, "status" => "executed"); - self.last_executed_l1_batch += 1; - true - } else { - false - } - } } diff --git a/core/bin/zksync_core/src/sync_layer/genesis.rs b/core/bin/zksync_core/src/sync_layer/genesis.rs index 2bb4dade8aa5..688b8973eb88 100644 --- a/core/bin/zksync_core/src/sync_layer/genesis.rs +++ b/core/bin/zksync_core/src/sync_layer/genesis.rs @@ -14,12 +14,12 @@ pub async fn perform_genesis_if_needed( base_system_contracts_hashes: BaseSystemContractsHashes, main_node_url: String, ) { - let mut transaction = storage.start_transaction_blocking(); + let mut transaction = storage.start_transaction().await; let genesis_block_hash = ensure_genesis_state( &mut transaction, zksync_chain_id, - GenesisParams::ExternalNode { + &GenesisParams::ExternalNode { base_system_contracts_hashes, main_node_url: main_node_url.clone(), }, @@ -27,7 +27,7 @@ pub async fn perform_genesis_if_needed( .await; validate_genesis_state(&main_node_url, genesis_block_hash).await; - transaction.commit_blocking(); + transaction.commit().await; } // When running an external node, we want to make sure we have the same @@ -55,10 +55,12 @@ pub async fn fetch_system_contract_by_hash( hash: H256, ) -> Result { let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - let bytecode = client - .get_bytecode_by_hash(hash) - .await? - .expect("Failed to get base system contract bytecode"); + let bytecode = client.get_bytecode_by_hash(hash).await?.unwrap_or_else(|| { + panic!( + "Base system contract bytecode is absent on the main node. Dependency hash: {:?}", + hash + ) + }); assert_eq!( hash, zksync_utils::bytecode::hash_bytecode(&bytecode), diff --git a/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs b/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs deleted file mode 100644 index ba649576a2df..000000000000 --- a/core/bin/zksync_core/src/sync_layer/mock_batch_executor.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! This module provide a mock batch executor that in fact does not execute any transactions. -//! This is a stub that is helpful for the development of the External Node, as it allows to -//! not focus on the execution of the transactions, but rather only care about the data flow between -//! the fetcher and the state keeper. -//! -//! This is temporary module which will be removed once EN binary is more or less ready. -//! It also has a fair amount of copy-paste from the state keeper tests, which is OK, given that this module -//! is temporary and otherwise we would've had to make the state keeper tests public. - -use std::sync::mpsc; - -use vm::{ - vm::{VmPartialExecutionResult, VmTxExecutionResult}, - VmBlockResult, VmExecutionResult, -}; -use zksync_types::tx::tx_execution_info::TxExecutionStatus; -use zksync_types::vm_trace::{VmExecutionTrace, VmTrace}; - -use crate::state_keeper::{ - batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, - io::L1BatchParams, - types::ExecutionMetricsForCriteria, -}; - -#[derive(Debug)] -pub struct MockBatchExecutorBuilder; - -impl L1BatchExecutorBuilder for MockBatchExecutorBuilder { - fn init_batch(&self, _l1_batch_params: L1BatchParams) -> BatchExecutorHandle { - let (tx, rx) = mpsc::channel::(); - let responder_thread_handle = std::thread::spawn(move || loop { - let action = rx.recv().unwrap(); - match action { - Command::ExecuteTx(_, resp) => { - resp.send(successful_exec()).unwrap(); - } - Command::RollbackLastTx(_resp) => { - panic!("Rollback should never happen"); - } - Command::FinishBatch(resp) => { - // Blanket result, it doesn't really matter. - let result = VmBlockResult { - full_result: VmExecutionResult { - events: Default::default(), - storage_log_queries: Default::default(), - used_contract_hashes: Default::default(), - l2_to_l1_logs: Default::default(), - return_data: Default::default(), - gas_used: Default::default(), - contracts_used: Default::default(), - revert_reason: Default::default(), - trace: VmTrace::ExecutionTrace(VmExecutionTrace::default()), - total_log_queries: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - }, - block_tip_result: VmPartialExecutionResult { - logs: Default::default(), - revert_reason: Default::default(), - contracts_used: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - }, - }; - - resp.send(result).unwrap(); - break; - } - } - }); - - BatchExecutorHandle::from_raw(responder_thread_handle, tx) - } -} - -fn partial_execution_result() -> VmPartialExecutionResult { - VmPartialExecutionResult { - logs: Default::default(), - revert_reason: Default::default(), - contracts_used: Default::default(), - cycles_used: Default::default(), - computational_gas_used: Default::default(), - } -} - -/// Creates a `TxExecutionResult` object denoting a successful tx execution. -pub(crate) fn successful_exec() -> TxExecutionResult { - TxExecutionResult::Success { - tx_result: Box::new(VmTxExecutionResult { - status: TxExecutionStatus::Success, - result: partial_execution_result(), - call_traces: vec![], - gas_refunded: 0, - operator_suggested_refund: 0, - }), - tx_metrics: ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }, - bootloader_dry_run_metrics: ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }, - bootloader_dry_run_result: Box::new(partial_execution_result()), - compressed_bytecodes: vec![], - } -} diff --git a/core/bin/zksync_core/src/sync_layer/mod.rs b/core/bin/zksync_core/src/sync_layer/mod.rs index 85faced646aa..77d6dac117a2 100644 --- a/core/bin/zksync_core/src/sync_layer/mod.rs +++ b/core/bin/zksync_core/src/sync_layer/mod.rs @@ -3,7 +3,6 @@ mod cached_main_node_client; pub mod external_io; pub mod fetcher; pub mod genesis; -pub mod mock_batch_executor; pub(crate) mod sync_action; mod sync_state; diff --git a/core/bin/zksync_core/src/sync_layer/sync_action.rs b/core/bin/zksync_core/src/sync_layer/sync_action.rs index 311db9dd0802..d11c14fba6d8 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_action.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_action.rs @@ -4,9 +4,8 @@ use std::{ time::Instant, }; -use chrono::{DateTime, Utc}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H256}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction}; /// Action queue is used to communicate between the fetcher and the rest of the external node /// by collecting the fetched data in memory until it gets processed by the different entities. @@ -46,19 +45,6 @@ impl ActionQueue { self.read_lock().actions.len() < ACTION_CAPACITY } - /// Returns true if the queue has capacity for a new status change. - /// Capacity is limited to avoid memory exhaustion. - pub(crate) fn has_status_change_capacity(&self) -> bool { - const STATUS_CHANGE_CAPACITY: usize = 8192; - - // We don't really care about any particular queue size, as the only intention - // of this check is to prevent memory exhaustion. - let read_lock = self.read_lock(); - read_lock.commit_status_changes.len() < STATUS_CHANGE_CAPACITY - && read_lock.prove_status_changes.len() < STATUS_CHANGE_CAPACITY - && read_lock.execute_status_changes.len() < STATUS_CHANGE_CAPACITY - } - /// Pushes a set of actions to the queue. /// /// Requires that the actions are in the correct order: starts with a new open batch/miniblock, @@ -75,64 +61,6 @@ impl ActionQueue { self.write_lock().actions.extend(actions); } - /// Pushes a notification about certain batch being committed. - pub(crate) fn push_commit_status_change(&self, change: BatchStatusChange) { - metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "commit"); - self.write_lock().commit_status_changes.push_back(change); - } - - /// Pushes a notification about certain batch being proven. - pub(crate) fn push_prove_status_change(&self, change: BatchStatusChange) { - metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "prove"); - self.write_lock().prove_status_changes.push_back(change); - } - - /// Pushes a notification about certain batch being executed. - pub(crate) fn push_execute_status_change(&self, change: BatchStatusChange) { - metrics::increment_gauge!("external_node.action_queue.status_change_queue_size", 1_f64, "item" => "execute"); - self.write_lock().execute_status_changes.push_back(change); - } - - /// Collects all status changes and returns them. - pub(crate) fn take_status_changes(&self, last_sealed_batch: L1BatchNumber) -> StatusChanges { - fn drain( - queue: &mut VecDeque, - last_sealed_batch: L1BatchNumber, - ) -> Vec { - let range_end = queue - .iter() - .position(|change| change.number > last_sealed_batch) - .unwrap_or(queue.len()); - queue.drain(..range_end).collect() - } - - let mut write_lock = self.write_lock(); - - let result = StatusChanges { - commit: drain(&mut write_lock.commit_status_changes, last_sealed_batch), - prove: drain(&mut write_lock.prove_status_changes, last_sealed_batch), - execute: drain(&mut write_lock.execute_status_changes, last_sealed_batch), - }; - - metrics::gauge!( - "external_node.action_queue.status_change_queue_size", - write_lock.commit_status_changes.len() as f64, - "item" => "commit" - ); - metrics::gauge!( - "external_node.action_queue.status_change_queue_size", - write_lock.prove_status_changes.len() as f64, - "item" => "prove" - ); - metrics::gauge!( - "external_node.action_queue.status_change_queue_size", - write_lock.execute_status_changes.len() as f64, - "item" => "execute" - ); - - result - } - /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. @@ -187,26 +115,9 @@ impl ActionQueue { } } -#[derive(Debug)] -pub(crate) struct StatusChanges { - pub(crate) commit: Vec, - pub(crate) prove: Vec, - pub(crate) execute: Vec, -} - -impl StatusChanges { - /// Returns true if there are no status changes. - pub(crate) fn is_empty(&self) -> bool { - self.commit.is_empty() && self.prove.is_empty() && self.execute.is_empty() - } -} - #[derive(Debug, Default)] struct ActionQueueInner { actions: VecDeque, - commit_status_changes: VecDeque, - prove_status_changes: VecDeque, - execute_status_changes: VecDeque, } /// An instruction for the ExternalIO to request a certain action from the state keeper. @@ -234,18 +145,15 @@ pub(crate) enum SyncAction { SealBatch, } -/// Represents a change in the batch status. -/// It may be a batch being committed, proven or executed. -#[derive(Debug)] -pub(crate) struct BatchStatusChange { - pub(crate) number: L1BatchNumber, - pub(crate) l1_tx_hash: H256, - pub(crate) happened_at: DateTime, +impl From for SyncAction { + fn from(tx: Transaction) -> Self { + Self::Tx(Box::new(tx)) + } } #[cfg(test)] mod tests { - use zksync_types::l2::L2Tx; + use zksync_types::{l2::L2Tx, H256}; use super::*; @@ -365,50 +273,4 @@ mod tests { ); } } - - fn batch_status_change(batch: u32) -> BatchStatusChange { - BatchStatusChange { - number: L1BatchNumber(batch), - l1_tx_hash: H256::default(), - happened_at: Utc::now(), - } - } - - /// Checks that `ActionQueue::take_status_changes` correctly takes the status changes from the queue. - #[test] - fn take_status_changes() { - let queue = ActionQueue::new(); - let taken = queue.take_status_changes(L1BatchNumber(1000)); - assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); - - queue.push_commit_status_change(batch_status_change(1)); - queue.push_prove_status_change(batch_status_change(1)); - - let taken = queue.take_status_changes(L1BatchNumber(0)); - assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); - - let taken = queue.take_status_changes(L1BatchNumber(1)); - assert!(taken.commit.len() == 1 && taken.prove.len() == 1 && taken.execute.is_empty()); - // Changes are already taken. - let taken = queue.take_status_changes(L1BatchNumber(1)); - assert!(taken.commit.is_empty() && taken.prove.is_empty() && taken.execute.is_empty()); - - // Test partial draining. - queue.push_commit_status_change(batch_status_change(2)); - queue.push_commit_status_change(batch_status_change(3)); - queue.push_commit_status_change(batch_status_change(4)); - queue.push_prove_status_change(batch_status_change(2)); - queue.push_prove_status_change(batch_status_change(3)); - queue.push_execute_status_change(batch_status_change(1)); - queue.push_execute_status_change(batch_status_change(2)); - let taken = queue.take_status_changes(L1BatchNumber(3)); - assert_eq!(taken.commit.len(), 2); - assert_eq!(taken.prove.len(), 2); - assert_eq!(taken.execute.len(), 2); - - let taken = queue.take_status_changes(L1BatchNumber(4)); - assert_eq!(taken.commit.len(), 1); - assert_eq!(taken.prove.len(), 0); - assert_eq!(taken.execute.len(), 0); - } } diff --git a/core/bin/zksync_core/src/sync_layer/sync_state.rs b/core/bin/zksync_core/src/sync_layer/sync_state.rs index 7fdb1f28cc40..6cac8bd70b76 100644 --- a/core/bin/zksync_core/src/sync_layer/sync_state.rs +++ b/core/bin/zksync_core/src/sync_layer/sync_state.rs @@ -85,7 +85,8 @@ impl SyncState { .0 .checked_sub(local_block.0) else { - return (false, None); + // We're ahead of the main node, this situation is handled by the reorg detector. + return (true, Some(0)); }; (block_diff <= SYNC_MINIBLOCK_DELTA, Some(block_diff)) } else { @@ -136,6 +137,9 @@ mod tests { sync_state.set_main_node_block(MiniblockNumber(1)); sync_state.set_local_block(MiniblockNumber(2)); // ^ should not panic, as we defer the situation to the reorg detector. + + // At the same time, we should consider ourselves synced unless `ReorgDetector` tells us otherwise. + assert!(sync_state.is_synced()); } #[test] diff --git a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs index 64e66f87f9d6..0c772221feb5 100644 --- a/core/bin/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/bin/zksync_core/src/witness_generator/basic_circuits.rs @@ -1,8 +1,6 @@ -use std::cell::RefCell; use std::collections::hash_map::DefaultHasher; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; -use std::rc::Rc; use std::sync::Arc; use std::time::Instant; @@ -16,10 +14,9 @@ use vm::{memory::SimpleMemory, StorageOracle, MAX_CYCLES_FOR_TX}; use zksync_config::configs::WitnessGeneratorConfig; use zksync_config::constants::BOOTLOADER_ADDRESS; use zksync_dal::ConnectionPool; -use zksync_db_storage_provider::DbStorageProvider; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_queued_job_processor::JobProcessor; -use zksync_state::storage_view::StorageView; +use zksync_state::{PostgresStorage, StorageView}; use zksync_types::zkevm_test_harness::toolset::GeometryConfig; use zksync_types::{ circuit::GEOMETRY_CONFIG, @@ -64,19 +61,29 @@ pub struct BasicWitnessGeneratorJob { pub struct BasicWitnessGenerator { config: WitnessGeneratorConfig, object_store: Arc, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, } impl BasicWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: WitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { config, - object_store: store_factory.create_store().into(), + object_store: store_factory.create_store().await.into(), + connection_pool, + prover_connection_pool, } } - fn process_job_sync( - object_store: &dyn ObjectStore, + async fn process_job_impl( + object_store: Arc, connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, ) -> Option { @@ -95,10 +102,16 @@ impl BasicWitnessGenerator { block_number.0, blocks_proving_percentage ); - let mut storage = connection_pool.access_storage_blocking(); + let mut storage = connection_pool.access_storage().await; storage + .blocks_dal() + .set_skip_proof_for_l1_batch(block_number) + .await; + let mut prover_storage = prover_connection_pool.access_storage().await; + prover_storage .witness_generator_dal() - .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits); + .mark_witness_job_as_skipped(block_number, AggregationRound::BasicCircuits) + .await; return None; } } @@ -110,14 +123,17 @@ impl BasicWitnessGenerator { block_number.0 ); - Some(process_basic_circuits_job( - object_store, - config, - connection_pool, - started_at, - block_number, - job, - )) + Some( + process_basic_circuits_job( + object_store, + config, + connection_pool, + started_at, + block_number, + job, + ) + .await, + ) } } @@ -130,63 +146,69 @@ impl JobProcessor for BasicWitnessGenerator { const SERVICE_NAME: &'static str = "basic_circuit_witness_generator"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - match connection + match prover_connection .witness_generator_dal() .get_next_basic_circuit_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, - ) { + ) + .await + { Some(metadata) => { - let job = get_artifacts(metadata.block_number, &*self.object_store); + let job = get_artifacts(metadata.block_number, &self.object_store).await; Some((job.block_number, job)) } None => None, } } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) -> () { - connection_pool - .access_storage_blocking() + async fn save_failure(&self, job_id: L1BatchNumber, started_at: Instant, error: String) -> () { + let attempts = self + .prover_connection_pool + .access_storage() + .await .witness_generator_dal() .mark_witness_job_as_failed( - job_id, AggregationRound::BasicCircuits, + job_id, started_at.elapsed(), error, - self.config.max_attempts, - ); + ) + .await; + + if attempts >= self.config.max_attempts { + self.connection_pool + .access_storage() + .await + .blocks_dal() + .set_skip_proof_for_l1_batch(job_id) + .await; + } } #[allow(clippy::async_yields_async)] async fn process_job( &self, - connection_pool: ConnectionPool, job: BasicWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { let object_store = Arc::clone(&self.object_store); - tokio::task::spawn_blocking(move || { - Self::process_job_sync(&*object_store, connection_pool.clone(), job, started_at) - }) + tokio::spawn(Self::process_job_impl( + object_store, + self.connection_pool.clone(), + self.prover_connection_pool.clone(), + job, + started_at, + )) } async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, optional_artifacts: Option, @@ -194,15 +216,15 @@ impl JobProcessor for BasicWitnessGenerator { match optional_artifacts { None => (), Some(artifacts) => { - let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); - update_database(connection_pool, started_at, job_id, blob_urls); + let blob_urls = save_artifacts(job_id, artifacts, &self.object_store).await; + update_database(&self.prover_connection_pool, started_at, job_id, blob_urls).await; } } } } -pub fn process_basic_circuits_job( - object_store: &dyn ObjectStore, +pub async fn process_basic_circuits_job( + object_store: Arc, config: WitnessGeneratorConfig, connection_pool: ConnectionPool, started_at: Instant, @@ -210,9 +232,10 @@ pub fn process_basic_circuits_job( job: PrepareBasicCircuitsJob, ) -> BasicCircuitArtifacts { let witness_gen_input = - build_basic_circuits_witness_generator_input(connection_pool.clone(), job, block_number); + build_basic_circuits_witness_generator_input(connection_pool.clone(), job, block_number) + .await; let (basic_circuits, basic_circuits_inputs, scheduler_witness) = - generate_witness(object_store, config, connection_pool, witness_gen_input); + generate_witness(object_store, config, connection_pool, witness_gen_input).await; let circuits = basic_circuits.clone().into_flattened_set(); vlog::info!( @@ -230,67 +253,78 @@ pub fn process_basic_circuits_job( } } -fn update_database( - connection_pool: ConnectionPool, +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, blob_urls: BlobUrls, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; - transaction.witness_generator_dal().create_aggregation_jobs( - block_number, - &blob_urls.basic_circuits_url, - &blob_urls.basic_circuits_inputs_url, - blob_urls.circuit_types_and_urls.len(), - &blob_urls.scheduler_witness_url, - ); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, - AggregationRound::BasicCircuits, - ); + transaction + .witness_generator_dal() + .create_aggregation_jobs( + block_number, + &blob_urls.basic_circuits_url, + &blob_urls.basic_circuits_inputs_url, + blob_urls.circuit_types_and_urls.len(), + &blob_urls.scheduler_witness_url, + ) + .await; + transaction + .prover_dal() + .insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::BasicCircuits, + ) + .await; transaction .witness_generator_dal() .mark_witness_job_as_successful( block_number, AggregationRound::BasicCircuits, started_at.elapsed(), - ); + ) + .await; - transaction.commit_blocking(); + transaction.commit().await; track_witness_generation_stage(started_at, AggregationRound::BasicCircuits); } -pub fn get_artifacts( +async fn get_artifacts( block_number: L1BatchNumber, object_store: &dyn ObjectStore, ) -> BasicWitnessGeneratorJob { - let job = object_store.get(block_number).unwrap(); + let job = object_store.get(block_number).await.unwrap(); BasicWitnessGeneratorJob { block_number, job } } -fn save_artifacts( +async fn save_artifacts( block_number: L1BatchNumber, artifacts: BasicCircuitArtifacts, object_store: &dyn ObjectStore, ) -> BlobUrls { let basic_circuits_url = object_store .put(block_number, &artifacts.basic_circuits) + .await .unwrap(); let basic_circuits_inputs_url = object_store .put(block_number, &artifacts.basic_circuits_inputs) + .await .unwrap(); let scheduler_witness_url = object_store .put(block_number, &artifacts.scheduler_witness) + .await .unwrap(); let circuit_types_and_urls = save_prover_input_artifacts( block_number, &artifacts.circuits, object_store, AggregationRound::BasicCircuits, - ); + ) + .await; BlobUrls { basic_circuits_url, basic_circuits_inputs_url, @@ -301,23 +335,26 @@ fn save_artifacts( // If making changes to this method, consider moving this logic to the DAL layer and make // `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. -pub fn build_basic_circuits_witness_generator_input( +pub async fn build_basic_circuits_witness_generator_input( connection_pool: ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, block_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { - let mut connection = connection_pool.access_storage_blocking(); + let mut connection = connection_pool.access_storage().await; let block_header = connection .blocks_dal() .get_block_header(block_number) + .await .unwrap(); let previous_block_header = connection .blocks_dal() .get_block_header(block_number - 1) + .await .unwrap(); let previous_block_hash = connection .blocks_dal() .get_block_state_root(block_number - 1) + .await .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { block_number, @@ -330,8 +367,8 @@ pub fn build_basic_circuits_witness_generator_input( } } -pub fn generate_witness( - object_store: &dyn ObjectStore, +pub async fn generate_witness( + object_store: Arc, config: WitnessGeneratorConfig, connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, @@ -340,22 +377,25 @@ pub fn generate_witness( BlockBasicCircuitsPublicInputs, SchedulerCircuitInstanceWitness, ) { - let mut connection = connection_pool.access_storage_blocking(); + let mut connection = connection_pool.access_storage().await; let header = connection .blocks_dal() .get_block_header(input.block_number) + .await .unwrap(); let bootloader_code_bytes = connection .storage_dal() .get_factory_dep(header.base_system_contracts_hashes.bootloader) + .await .expect("Bootloader bytecode should exist"); let bootloader_code = bytes_to_chunks(&bootloader_code_bytes); let account_bytecode_bytes = connection .storage_dal() .get_factory_dep(header.base_system_contracts_hashes.default_aa) + .await .expect("Default aa bytecode should exist"); let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - let bootloader_contents = expand_bootloader_contents(input.initial_heap_content); + let bootloader_contents = expand_bootloader_contents(&input.initial_heap_content); let account_code_hash = h256_to_u256(header.base_system_contracts_hashes.default_aa); let hashes: HashSet = input @@ -366,7 +406,7 @@ pub fn generate_witness( .map(|hash| u256_to_h256(*hash)) .collect(); - let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes); + let mut used_bytecodes = connection.storage_dal().get_factory_deps(&hashes).await; if input.used_bytecodes_hashes.contains(&account_code_hash) { used_bytecodes.insert(account_code_hash, account_bytecode); } @@ -383,66 +423,79 @@ pub fn generate_witness( let (_, last_miniblock_number) = connection .blocks_dal() .get_miniblock_range_of_l1_batch(input.block_number - 1) + .await .expect("L1 batch should contain at least one miniblock"); - let db_storage_provider = DbStorageProvider::new(connection, last_miniblock_number, true); - let mut tree = PrecalculatedMerklePathsProvider::new( - input.merkle_paths_input, - input.previous_block_hash.0, - ); - let storage_ptr: &mut dyn vm::storage::Storage = &mut StorageView::new(db_storage_provider); - let storage_oracle: StorageOracle = - StorageOracle::new(Rc::new(RefCell::new(storage_ptr))); - let memory: SimpleMemory = SimpleMemory::default(); - let mut hasher = DefaultHasher::new(); - GEOMETRY_CONFIG.hash(&mut hasher); - vlog::info!( - "generating witness for block {} using geometry config hash: {}", - input.block_number.0, - hasher.finish() - ); - if config - .dump_arguments_for_blocks - .contains(&input.block_number.0) - { - save_run_with_fixed_params_args_to_gcs( - object_store, + drop(connection); + let rt_handle = tokio::runtime::Handle::current(); + + // The following part is CPU-heavy, so we move it to a separate thread. + tokio::task::spawn_blocking(move || { + let connection = rt_handle.block_on(connection_pool.access_storage()); + let storage = + PostgresStorage::new(rt_handle.clone(), connection, last_miniblock_number, true); + let mut tree = PrecalculatedMerklePathsProvider::new( + input.merkle_paths_input, + input.previous_block_hash.0, + ); + + let storage_view = &mut StorageView::new(storage); + let storage_oracle: StorageOracle = + StorageOracle::new(storage_view.as_ptr()); + let memory: SimpleMemory = SimpleMemory::default(); + let mut hasher = DefaultHasher::new(); + GEOMETRY_CONFIG.hash(&mut hasher); + vlog::info!( + "generating witness for block {} using geometry config hash: {}", input.block_number.0, - last_miniblock_number.0, + hasher.finish() + ); + + if config + .dump_arguments_for_blocks + .contains(&input.block_number.0) + { + rt_handle.block_on(save_run_with_fixed_params_args_to_gcs( + object_store, + input.block_number.0, + last_miniblock_number.0, + Address::zero(), + BOOTLOADER_ADDRESS, + bootloader_code.clone(), + bootloader_contents.clone(), + false, + account_code_hash, + used_bytecodes.clone(), + Vec::default(), + MAX_CYCLES_FOR_TX as usize, + GEOMETRY_CONFIG, + tree.clone(), + )); + } + + zksync_types::zkevm_test_harness::external_calls::run_with_fixed_params( Address::zero(), BOOTLOADER_ADDRESS, - bootloader_code.clone(), - bootloader_contents.clone(), + bootloader_code, + bootloader_contents, false, account_code_hash, - used_bytecodes.clone(), + used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, GEOMETRY_CONFIG, - tree.clone(), - ); - } - - zksync_types::zkevm_test_harness::external_calls::run_with_fixed_params( - Address::zero(), - BOOTLOADER_ADDRESS, - bootloader_code, - bootloader_contents, - false, - account_code_hash, - used_bytecodes, - Vec::default(), - MAX_CYCLES_FOR_TX as usize, - GEOMETRY_CONFIG, - storage_oracle, - memory, - &mut tree, - ) + storage_oracle, + memory, + &mut tree, + ) + }) + .await + .unwrap() } #[allow(clippy::too_many_arguments)] -fn save_run_with_fixed_params_args_to_gcs( - object_store: &dyn ObjectStore, +async fn save_run_with_fixed_params_args_to_gcs( + object_store: Arc, l1_batch_number: u32, last_miniblock_number: u32, caller: Address, @@ -474,6 +527,7 @@ fn save_run_with_fixed_params_args_to_gcs( }; object_store .put(L1BatchNumber(l1_batch_number), &run_with_fixed_params_input) + .await .unwrap(); } diff --git a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs index 8298d8847f27..e41786353e41 100644 --- a/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -48,13 +48,22 @@ pub struct LeafAggregationWitnessGeneratorJob { pub struct LeafAggregationWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, } impl LeafAggregationWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: WitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + connection_pool, + prover_connection_pool, } } @@ -81,51 +90,54 @@ impl JobProcessor for LeafAggregationWitnessGenerator { const SERVICE_NAME: &'static str = "leaf_aggregation_witness_generator"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - match connection + match prover_connection .witness_generator_dal() .get_next_leaf_aggregation_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, - ) { + ) + .await + { Some(metadata) => { - let job = get_artifacts(metadata, &*self.object_store); + let job = get_artifacts(metadata, &*self.object_store).await; Some((job.block_number, job)) } None => None, } } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) -> () { - connection_pool - .access_storage_blocking() + async fn save_failure(&self, job_id: L1BatchNumber, started_at: Instant, error: String) -> () { + let attempts = self + .prover_connection_pool + .access_storage() + .await .witness_generator_dal() .mark_witness_job_as_failed( - job_id, AggregationRound::LeafAggregation, + job_id, started_at.elapsed(), error, - self.config.max_attempts, - ); + ) + .await; + + if attempts >= self.config.max_attempts { + self.connection_pool + .access_storage() + .await + .blocks_dal() + .set_skip_proof_for_l1_batch(job_id) + .await; + } } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: LeafAggregationWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -134,20 +146,20 @@ impl JobProcessor for LeafAggregationWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, artifacts: LeafAggregationArtifacts, ) { let leaf_circuits_len = artifacts.leaf_circuits.len(); - let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); + let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store).await; update_database( - connection_pool, + &self.prover_connection_pool, started_at, job_id, leaf_circuits_len, blob_urls, - ); + ) + .await; } } @@ -212,15 +224,15 @@ pub fn process_leaf_aggregation_job( } } -fn update_database( - connection_pool: ConnectionPool, +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, leaf_circuits_len: usize, blob_urls: BlobUrls, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; // inserts artifacts into the node_aggregation_witness_jobs table // and advances it to waiting_for_proofs status @@ -231,30 +243,35 @@ fn update_database( leaf_circuits_len, &blob_urls.leaf_layer_subqueues_url, &blob_urls.aggregation_outputs_url, - ); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, - AggregationRound::LeafAggregation, - ); + ) + .await; + transaction + .prover_dal() + .insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::LeafAggregation, + ) + .await; transaction .witness_generator_dal() .mark_witness_job_as_successful( block_number, AggregationRound::LeafAggregation, started_at.elapsed(), - ); + ) + .await; - transaction.commit_blocking(); + transaction.commit().await; track_witness_generation_stage(started_at, AggregationRound::LeafAggregation); } -pub fn get_artifacts( +async fn get_artifacts( metadata: WitnessGeneratorJobMetadata, object_store: &dyn ObjectStore, ) -> LeafAggregationWitnessGeneratorJob { - let basic_circuits = object_store.get(metadata.block_number).unwrap(); - let basic_circuits_inputs = object_store.get(metadata.block_number).unwrap(); + let basic_circuits = object_store.get(metadata.block_number).await.unwrap(); + let basic_circuits_inputs = object_store.get(metadata.block_number).await.unwrap(); LeafAggregationWitnessGeneratorJob { block_number: metadata.block_number, @@ -266,23 +283,26 @@ pub fn get_artifacts( } } -fn save_artifacts( +async fn save_artifacts( block_number: L1BatchNumber, artifacts: LeafAggregationArtifacts, object_store: &dyn ObjectStore, ) -> BlobUrls { let leaf_layer_subqueues_url = object_store .put(block_number, &artifacts.leaf_layer_subqueues) + .await .unwrap(); let aggregation_outputs_url = object_store .put(block_number, &artifacts.aggregation_outputs) + .await .unwrap(); let circuit_types_and_urls = save_prover_input_artifacts( block_number, &artifacts.leaf_circuits, object_store, AggregationRound::LeafAggregation, - ); + ) + .await; BlobUrls { leaf_layer_subqueues_url, aggregation_outputs_url, diff --git a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs index 19bdf4d7d126..0f7f67d982ca 100644 --- a/core/bin/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/bin/zksync_core/src/witness_generator/node_aggregation.rs @@ -55,13 +55,22 @@ pub struct NodeAggregationWitnessGeneratorJob { pub struct NodeAggregationWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, } impl NodeAggregationWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: WitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + connection_pool, + prover_connection_pool, } } @@ -89,51 +98,54 @@ impl JobProcessor for NodeAggregationWitnessGenerator { const SERVICE_NAME: &'static str = "node_aggregation_witness_generator"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut prover_connection = self.prover_connection_pool.access_storage().await; let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - match connection + match prover_connection .witness_generator_dal() .get_next_node_aggregation_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, - ) { + ) + .await + { Some(metadata) => { - let job = get_artifacts(metadata, &*self.object_store); + let job = get_artifacts(metadata, &*self.object_store).await; return Some((job.block_number, job)); } None => None, } } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) -> () { - connection_pool - .access_storage_blocking() + async fn save_failure(&self, job_id: L1BatchNumber, started_at: Instant, error: String) -> () { + let attempts = self + .prover_connection_pool + .access_storage() + .await .witness_generator_dal() .mark_witness_job_as_failed( - job_id, AggregationRound::NodeAggregation, + job_id, started_at.elapsed(), error, - self.config.max_attempts, - ); + ) + .await; + + if attempts >= self.config.max_attempts { + self.connection_pool + .access_storage() + .await + .blocks_dal() + .set_skip_proof_for_l1_batch(job_id) + .await; + } } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: NodeAggregationWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -142,13 +154,12 @@ impl JobProcessor for NodeAggregationWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, artifacts: NodeAggregationArtifacts, ) { - let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store); - update_database(connection_pool, started_at, job_id, blob_urls); + let blob_urls = save_artifacts(job_id, artifacts, &*self.object_store).await; + update_database(&self.prover_connection_pool, started_at, job_id, blob_urls).await; } } @@ -259,46 +270,53 @@ pub fn process_node_aggregation_job( } } -fn update_database( - connection_pool: ConnectionPool, +async fn update_database( + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, blob_urls: BlobUrls, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; // inserts artifacts into the scheduler_witness_jobs table // and advances it to waiting_for_proofs status transaction .witness_generator_dal() - .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url); - transaction.prover_dal().insert_prover_jobs( - block_number, - blob_urls.circuit_types_and_urls, - AggregationRound::NodeAggregation, - ); + .save_node_aggregation_artifacts(block_number, &blob_urls.node_aggregations_url) + .await; + transaction + .prover_dal() + .insert_prover_jobs( + block_number, + blob_urls.circuit_types_and_urls, + AggregationRound::NodeAggregation, + ) + .await; transaction .witness_generator_dal() .mark_witness_job_as_successful( block_number, AggregationRound::NodeAggregation, started_at.elapsed(), - ); + ) + .await; - transaction.commit_blocking(); + transaction.commit().await; track_witness_generation_stage(started_at, AggregationRound::NodeAggregation); } -pub fn get_artifacts( +async fn get_artifacts( metadata: WitnessGeneratorJobMetadata, object_store: &dyn ObjectStore, ) -> NodeAggregationWitnessGeneratorJob { let leaf_layer_subqueues = object_store .get(metadata.block_number) + .await .expect("leaf_layer_subqueues not found in queued `node_aggregation_witness_jobs` job"); let aggregation_outputs = object_store .get(metadata.block_number) + .await .expect("aggregation_outputs not found in queued `node_aggregation_witness_jobs` job"); NodeAggregationWitnessGeneratorJob { @@ -311,20 +329,22 @@ pub fn get_artifacts( } } -fn save_artifacts( +async fn save_artifacts( block_number: L1BatchNumber, artifacts: NodeAggregationArtifacts, object_store: &dyn ObjectStore, ) -> BlobUrls { let node_aggregations_url = object_store .put(block_number, &artifacts.final_node_aggregation) + .await .unwrap(); let circuit_types_and_urls = save_prover_input_artifacts( block_number, &artifacts.node_circuits, object_store, AggregationRound::NodeAggregation, - ); + ) + .await; BlobUrls { node_aggregations_url, circuit_types_and_urls, diff --git a/core/bin/zksync_core/src/witness_generator/scheduler.rs b/core/bin/zksync_core/src/witness_generator/scheduler.rs index 97f973b5b41d..377c05ec4635 100644 --- a/core/bin/zksync_core/src/witness_generator/scheduler.rs +++ b/core/bin/zksync_core/src/witness_generator/scheduler.rs @@ -43,13 +43,22 @@ pub struct SchedulerWitnessGeneratorJob { pub struct SchedulerWitnessGenerator { config: WitnessGeneratorConfig, object_store: Box, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, } impl SchedulerWitnessGenerator { - pub fn new(config: WitnessGeneratorConfig, store_factory: &ObjectStoreFactory) -> Self { + pub async fn new( + config: WitnessGeneratorConfig, + store_factory: &ObjectStoreFactory, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + ) -> Self { Self { config, - object_store: store_factory.create_store(), + object_store: store_factory.create_store().await, + connection_pool, + prover_connection_pool, } } @@ -76,24 +85,25 @@ impl JobProcessor for SchedulerWitnessGenerator { const SERVICE_NAME: &'static str = "scheduler_witness_generator"; - async fn get_next_job( - &self, - connection_pool: ConnectionPool, - ) -> Option<(Self::JobId, Self::Job)> { - let mut connection = connection_pool.access_storage_blocking(); + async fn get_next_job(&self) -> Option<(Self::JobId, Self::Job)> { + let mut connection = self.connection_pool.access_storage().await; + let mut prover_connection = self.prover_connection_pool.access_storage().await; let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - match connection + match prover_connection .witness_generator_dal() .get_next_scheduler_witness_job( self.config.witness_generation_timeout(), self.config.max_attempts, last_l1_batch_to_process, - ) { + ) + .await + { Some(metadata) => { let prev_metadata = connection .blocks_dal() - .get_block_metadata(metadata.block_number - 1); + .get_block_metadata(metadata.block_number - 1) + .await; let previous_aux_hash = prev_metadata .as_ref() .map_or([0u8; 32], |e| e.metadata.aux_data_hash.0); @@ -104,36 +114,41 @@ impl JobProcessor for SchedulerWitnessGenerator { previous_aux_hash, previous_meta_hash, &*self.object_store, - ); + ) + .await; Some((job.block_number, job)) } None => None, } } - async fn save_failure( - &self, - connection_pool: ConnectionPool, - job_id: L1BatchNumber, - started_at: Instant, - error: String, - ) -> () { - connection_pool - .access_storage_blocking() + async fn save_failure(&self, job_id: L1BatchNumber, started_at: Instant, error: String) -> () { + let attempts = self + .prover_connection_pool + .access_storage() + .await .witness_generator_dal() .mark_witness_job_as_failed( - job_id, AggregationRound::Scheduler, + job_id, started_at.elapsed(), error, - self.config.max_attempts, - ); + ) + .await; + + if attempts >= self.config.max_attempts { + self.connection_pool + .access_storage() + .await + .blocks_dal() + .set_skip_proof_for_l1_batch(job_id) + .await; + } } #[allow(clippy::async_yields_async)] async fn process_job( &self, - _connection_pool: ConnectionPool, job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle { @@ -142,20 +157,21 @@ impl JobProcessor for SchedulerWitnessGenerator { async fn save_result( &self, - connection_pool: ConnectionPool, job_id: L1BatchNumber, started_at: Instant, artifacts: SchedulerArtifacts, ) { let circuit_types_and_urls = - save_artifacts(job_id, &artifacts.scheduler_circuit, &*self.object_store); + save_artifacts(job_id, &artifacts.scheduler_circuit, &*self.object_store).await; update_database( - connection_pool, + &self.connection_pool, + &self.prover_connection_pool, started_at, job_id, artifacts.final_aggregation_result, circuit_types_and_urls, - ); + ) + .await; } } @@ -230,18 +246,19 @@ pub fn process_scheduler_job( } } -pub fn update_database( - connection_pool: ConnectionPool, +pub async fn update_database( + connection_pool: &ConnectionPool, + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, final_aggregation_result: BlockApplicationWitness, circuit_types_and_urls: Vec<(&'static str, String)>, ) { - let mut connection = connection_pool.access_storage_blocking(); - let mut transaction = connection.start_transaction_blocking(); - let block = transaction + let mut connection = connection_pool.access_storage().await; + let block = connection .blocks_dal() .get_block_metadata(block_number) + .await .expect("L1 batch should exist"); assert_eq!( @@ -264,18 +281,24 @@ pub fn update_database( "Commitment is wrong" ); - transaction.prover_dal().insert_prover_jobs( - block_number, - circuit_types_and_urls, - AggregationRound::Scheduler, - ); + let mut prover_connection = prover_connection_pool.access_storage().await; + let mut transaction = prover_connection.start_transaction().await; + transaction + .prover_dal() + .insert_prover_jobs( + block_number, + circuit_types_and_urls, + AggregationRound::Scheduler, + ) + .await; transaction .witness_generator_dal() .save_final_aggregation_result( block_number, final_aggregation_result.aggregation_result_coords, - ); + ) + .await; transaction .witness_generator_dal() @@ -283,13 +306,14 @@ pub fn update_database( block_number, AggregationRound::Scheduler, started_at.elapsed(), - ); + ) + .await; - transaction.commit_blocking(); + transaction.commit().await; track_witness_generation_stage(started_at, AggregationRound::Scheduler); } -pub fn save_artifacts( +async fn save_artifacts( block_number: L1BatchNumber, scheduler_circuit: &ZkSyncCircuit>, object_store: &dyn ObjectStore, @@ -300,16 +324,17 @@ pub fn save_artifacts( object_store, AggregationRound::Scheduler, ) + .await } -pub fn get_artifacts( +async fn get_artifacts( metadata: WitnessGeneratorJobMetadata, previous_aux_hash: [u8; 32], previous_meta_hash: [u8; 32], object_store: &dyn ObjectStore, ) -> SchedulerWitnessGeneratorJob { - let scheduler_witness = object_store.get(metadata.block_number).unwrap(); - let final_node_aggregations = object_store.get(metadata.block_number).unwrap(); + let scheduler_witness = object_store.get(metadata.block_number).await.unwrap(); + let final_node_aggregations = object_store.get(metadata.block_number).await.unwrap(); SchedulerWitnessGeneratorJob { block_number: metadata.block_number, diff --git a/core/bin/zksync_core/src/witness_generator/utils.rs b/core/bin/zksync_core/src/witness_generator/utils.rs index 4c67c4f3644a..da2a2141d06e 100644 --- a/core/bin/zksync_core/src/witness_generator/utils.rs +++ b/core/bin/zksync_core/src/witness_generator/utils.rs @@ -6,7 +6,7 @@ use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_types::USED_BOOTLOADER_MEMORY_BYTES; use zksync_types::{proofs::AggregationRound, L1BatchNumber}; -pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { +pub fn expand_bootloader_contents(packed: &[(usize, U256)]) -> Vec { let mut result: Vec = Vec::new(); result.resize(USED_BOOTLOADER_MEMORY_BYTES, 0); @@ -17,25 +17,24 @@ pub fn expand_bootloader_contents(packed: Vec<(usize, U256)>) -> Vec { result.to_vec() } -pub fn save_prover_input_artifacts( +pub async fn save_prover_input_artifacts( block_number: L1BatchNumber, circuits: &[ZkSyncCircuit>], object_store: &dyn ObjectStore, aggregation_round: AggregationRound, ) -> Vec<(&'static str, String)> { - let types_and_urls = circuits - .iter() - .enumerate() - .map(|(sequence_number, circuit)| { - let circuit_type = circuit.short_description(); - let circuit_key = CircuitKey { - block_number, - sequence_number, - circuit_type, - aggregation_round, - }; - let blob_url = object_store.put(circuit_key, circuit).unwrap(); - (circuit_type, blob_url) - }); - types_and_urls.collect() + // We intentionally process circuits sequentially to not overwhelm the object store. + let mut types_and_urls = Vec::with_capacity(circuits.len()); + for (sequence_number, circuit) in circuits.iter().enumerate() { + let circuit_type = circuit.short_description(); + let circuit_key = CircuitKey { + block_number, + sequence_number, + circuit_type, + aggregation_round, + }; + let blob_url = object_store.put(circuit_key, circuit).await.unwrap(); + types_and_urls.push((circuit_type, blob_url)); + } + types_and_urls } diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index e177e30558b1..fcd4ecbdc8e8 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index b2c69a4300e1..17eb3a95add6 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/circuit_breaker/src/facet_selectors.rs b/core/lib/circuit_breaker/src/facet_selectors.rs index 03d1590e67f2..eca1ff1cc81f 100644 --- a/core/lib/circuit_breaker/src/facet_selectors.rs +++ b/core/lib/circuit_breaker/src/facet_selectors.rs @@ -3,8 +3,9 @@ use convert_case::{Case, Casing}; use std::{collections::BTreeMap, env, fmt, fs, path::Path, str::FromStr}; use zksync_config::configs::chain::CircuitBreakerConfig; -use zksync_eth_client::{types::Error as EthClientError, BoundEthInterface}; -use zksync_types::{ethabi::Token, Address}; +use zksync_contracts::zksync_contract; +use zksync_eth_client::{types::Error as EthClientError, EthInterface}; +use zksync_types::{ethabi::Token, Address, H160}; // local imports use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; @@ -31,10 +32,11 @@ pub struct FacetSelectorsChecker { // BTreeMap is used to have fixed order of elements when printing error. server_selectors: BTreeMap>, config: CircuitBreakerConfig, + main_contract: H160, } -impl FacetSelectorsChecker { - pub fn new(config: &CircuitBreakerConfig, eth_client: E) -> Self { +impl FacetSelectorsChecker { + pub fn new(config: &CircuitBreakerConfig, eth_client: E, main_contract: H160) -> Self { let zksync_home = env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let path_str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/facets"; let facets_path = Path::new(&zksync_home).join(path_str); @@ -60,6 +62,10 @@ impl FacetSelectorsChecker { let selectors = contract .functions .into_values() + .filter(|func| { + let func = func.first().cloned().unwrap(); + func.name != "getName" + }) .map(|func| { let func = func.first().cloned().unwrap(); format!("0x{}", hex::encode(func.short_signature())) @@ -74,11 +80,12 @@ impl FacetSelectorsChecker { eth_client, server_selectors, config: config.clone(), + main_contract, } } } -impl FacetSelectorsChecker { +impl FacetSelectorsChecker { async fn get_contract_facet_selectors(&self) -> BTreeMap> { let facets = self.get_facets_token_with_retry().await.unwrap(); @@ -89,7 +96,15 @@ impl FacetSelectorsChecker { (|| async { let result: Result = self .eth_client - .call_main_contract_function("facets", (), None, Default::default(), None) + .call_contract_function( + "facets", + (), + None, + Default::default(), + None, + self.main_contract, + zksync_contract(), + ) .await; result @@ -104,7 +119,7 @@ impl FacetSelectorsChecker { } #[async_trait::async_trait] -impl CircuitBreaker for FacetSelectorsChecker { +impl CircuitBreaker for FacetSelectorsChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { let contract_selectors = self.get_contract_facet_selectors().await; if self.server_selectors != contract_selectors { diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs index a988227f11a5..7befca4b4811 100644 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ b/core/lib/circuit_breaker/src/l1_txs.rs @@ -11,9 +11,11 @@ impl CircuitBreaker for FailedL1TransactionChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { if self .pool - .access_storage_blocking() + .access_storage() + .await .eth_sender_dal() .get_number_of_failed_transactions() + .await > 0 { return Err(CircuitBreakerError::FailedL1Transaction); diff --git a/core/lib/circuit_breaker/src/tests/mod.rs b/core/lib/circuit_breaker/src/tests/mod.rs index 0f793c52d816..978406b6f2a9 100644 --- a/core/lib/circuit_breaker/src/tests/mod.rs +++ b/core/lib/circuit_breaker/src/tests/mod.rs @@ -3,11 +3,12 @@ use std::sync::Mutex; use assert_matches::assert_matches; use async_trait::async_trait; -use zksync_config::configs::chain::CircuitBreakerConfig; +use zksync_config::configs::{chain::CircuitBreakerConfig, ContractsConfig}; use zksync_eth_client::{ types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, BoundEthInterface, EthInterface, }; +use zksync_types::web3::types::Block; use zksync_types::{ ethabi::Token, web3::{ @@ -193,6 +194,14 @@ impl EthInterface for ETHDirectClientMock { async fn logs(&self, _filter: Filter, _component: &'static str) -> Result, Error> { Ok(Default::default()) } + + async fn block( + &self, + _block_id: String, + _component: &'static str, + ) -> Result>, Error> { + Ok(Default::default()) + } } #[async_trait] @@ -253,8 +262,10 @@ async fn retries_for_contract_vk() { ))) ); + let contracts = ContractsConfig::from_env(); let config = get_test_circuit_breaker_config(); - let vks_checker = crate::vks::VksChecker::new(&config, eth_client); + let vks_checker = + crate::vks::VksChecker::new(&config, eth_client, contracts.diamond_proxy_addr); assert_matches!(vks_checker.get_vk_token_with_retries().await, Ok(_)); } @@ -282,9 +293,13 @@ async fn retries_for_facet_selectors() { ))) ); + let contracts = ContractsConfig::from_env(); let config = get_test_circuit_breaker_config(); - let facet_selectors_checker = - crate::facet_selectors::FacetSelectorsChecker::new(&config, eth_client); + let facet_selectors_checker = crate::facet_selectors::FacetSelectorsChecker::new( + &config, + eth_client, + contracts.diamond_proxy_addr, + ); assert_matches!( facet_selectors_checker.get_facets_token_with_retry().await, diff --git a/core/lib/circuit_breaker/src/vks.rs b/core/lib/circuit_breaker/src/vks.rs index 87fa1caeee3d..a69f7edf9011 100644 --- a/core/lib/circuit_breaker/src/vks.rs +++ b/core/lib/circuit_breaker/src/vks.rs @@ -8,14 +8,15 @@ use std::{ use thiserror::Error; use zksync_config::configs::chain::CircuitBreakerConfig; -use zksync_eth_client::{types::Error as EthClientError, BoundEthInterface}; +use zksync_contracts::zksync_contract; +use zksync_eth_client::{types::Error as EthClientError, EthInterface}; use zksync_types::{ ethabi::Token, zkevm_test_harness::bellman::{ bn256::{Fq, Fq2, Fr, G1Affine, G2Affine}, CurveAffine, PrimeField, }, - {Address, H256}, + Address, H160, H256, }; use zksync_verification_key_server::get_vk_for_circuit_type; @@ -69,13 +70,15 @@ pub struct VerificationKey { pub struct VksChecker { pub eth_client: E, pub config: CircuitBreakerConfig, + pub main_contract: Address, } -impl VksChecker { - pub fn new(config: &CircuitBreakerConfig, eth_client: E) -> Self { +impl VksChecker { + pub fn new(config: &CircuitBreakerConfig, eth_client: E, main_contract: H160) -> Self { Self { eth_client, config: config.clone(), + main_contract, } } @@ -86,7 +89,15 @@ impl VksChecker { let address_from_contract: Address = (|| async { let result: Result = self .eth_client - .call_main_contract_function("getVerifier", (), None, Default::default(), None) + .call_contract_function( + "getVerifier", + (), + None, + Default::default(), + None, + self.main_contract, + zksync_contract(), + ) .await; result }) @@ -113,12 +124,14 @@ impl VksChecker { let verifier_params_token: Token = (|| async { let result: Result = self .eth_client - .call_main_contract_function( + .call_contract_function( "getVerifierParams", (), None, Default::default(), None, + self.main_contract, + zksync_contract(), ) .await; result @@ -223,7 +236,7 @@ impl VksChecker { } #[async_trait::async_trait] -impl CircuitBreaker for VksChecker { +impl CircuitBreaker for VksChecker { async fn check(&self) -> Result<(), CircuitBreakerError> { self.check_verifier_address().await?; self.check_commitments().await?; diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index 24e49557ca14..7770e34d0c60 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/config/src/configs/alerts.rs b/core/lib/config/src/configs/alerts.rs new file mode 100644 index 000000000000..d9f9d191a804 --- /dev/null +++ b/core/lib/config/src/configs/alerts.rs @@ -0,0 +1,43 @@ +// Built-in uses +// External uses +use serde::Deserialize; + +use super::envy_load; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct AlertsConfig { + /// List of panics' messages from external crypto code, + /// that are sporadic and needed to be handled separately + pub sporadic_crypto_errors_substrs: Vec, +} + +impl AlertsConfig { + pub fn from_env() -> Self { + envy_load("sporadic_crypto_errors_substrs", "ALERTS_") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::configs::test_utils::set_env; + + fn expected_config() -> AlertsConfig { + AlertsConfig { + sporadic_crypto_errors_substrs: vec![ + "EventDestroyErr".to_string(), + "Can't free memory of DeviceBuf".to_string(), + "called `Result::unwrap()` on an `Err` value: PoisonError".to_string(), + ], + } + } + + #[test] + fn test_from_env() { + let config = r#" + ALERTS_SPORADIC_CRYPTO_ERRORS_SUBSTRS=EventDestroyErr,Can't free memory of DeviceBuf,called `Result::unwrap()` on an `Err` value: PoisonError + "#; + set_env(config); + assert_eq!(AlertsConfig::from_env(), expected_config()); + } +} diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index e4f2962878d9..9c40bb8acfda 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -4,36 +4,36 @@ use serde::Deserialize; use std::net::SocketAddr; use std::time::Duration; // Local uses -pub use crate::configs::utils::Prometheus; -use crate::envy_load; +use super::envy_load; +pub use crate::configs::PrometheusConfig; use zksync_basic_types::H256; /// API configuration. #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ApiConfig { /// Configuration options for the Web3 JSON RPC servers. - pub web3_json_rpc: Web3JsonRpc, + pub web3_json_rpc: Web3JsonRpcConfig, /// Configuration options for the REST servers. - pub explorer: Explorer, + pub explorer: ExplorerApiConfig, /// Configuration options for the Prometheus exporter. - pub prometheus: Prometheus, + pub prometheus: PrometheusConfig, /// Configuration options for the Health check. - pub healthcheck: HealthCheck, + pub healthcheck: HealthCheckConfig, } impl ApiConfig { pub fn from_env() -> Self { Self { - web3_json_rpc: envy_load!("web3_json_rpc", "API_WEB3_JSON_RPC_"), - explorer: envy_load!("explorer", "API_EXPLORER_"), - prometheus: envy_load!("prometheus", "API_PROMETHEUS_"), - healthcheck: envy_load!("healthcheck", "API_HEALTHCHECK_"), + web3_json_rpc: Web3JsonRpcConfig::from_env(), + explorer: ExplorerApiConfig::from_env(), + prometheus: PrometheusConfig::from_env(), + healthcheck: HealthCheckConfig::from_env(), } } } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct Web3JsonRpc { +pub struct Web3JsonRpcConfig { /// Port to which the HTTP RPC server is listening. pub http_port: u16, /// URL to access HTTP RPC server. @@ -69,14 +69,28 @@ pub struct Web3JsonRpc { pub estimate_gas_acceptable_overestimation: u32, /// Max possible size of an ABI encoded tx (in bytes). pub max_tx_size: usize, - /// Main node URL - used only by external node to proxy transactions to. - pub main_node_url: Option, /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. pub vm_execution_cache_misses_limit: Option, + /// Max number of VM instances to be concurrently spawned by the API server. + /// This option can be tweaked down if the API server is running out of memory. + /// If not set, the VM concurrency limit will be efficiently disabled. + pub vm_concurrency_limit: Option, + /// Smart contract cache size in MBs + pub factory_deps_cache_size_mb: Option, + /// Override value for the amount of threads used for HTTP RPC server. + /// If not set, the value from `threads_per_server` is used. + pub http_threads: Option, + /// Override value for the amount of threads used for WebSocket RPC server. + /// If not set, the value from `threads_per_server` is used. + pub ws_threads: Option, } -impl Web3JsonRpc { +impl Web3JsonRpcConfig { + pub fn from_env() -> Self { + envy_load("web3_json_rpc", "API_WEB3_JSON_RPC_") + } + pub fn http_bind_addr(&self) -> SocketAddr { SocketAddr::new("0.0.0.0".parse().unwrap(), self.http_port) } @@ -108,22 +122,39 @@ impl Web3JsonRpc { pub fn account_pks(&self) -> Vec { self.account_pks.clone().unwrap_or_default() } + + pub fn factory_deps_cache_size_mb(&self) -> usize { + // 128MB is the default smart contract code cache size. + self.factory_deps_cache_size_mb.unwrap_or(128) + } + + pub fn http_server_threads(&self) -> usize { + self.http_threads.unwrap_or(self.threads_per_server) as usize + } + + pub fn ws_server_threads(&self) -> usize { + self.ws_threads.unwrap_or(self.threads_per_server) as usize + } } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct HealthCheck { +pub struct HealthCheckConfig { /// Port to which the REST server is listening. pub port: u16, } -impl HealthCheck { +impl HealthCheckConfig { + pub fn from_env() -> Self { + envy_load("healthcheck", "API_HEALTHCHECK_") + } + pub fn bind_addr(&self) -> SocketAddr { SocketAddr::new("0.0.0.0".parse().unwrap(), self.port) } } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct Explorer { +pub struct ExplorerApiConfig { /// Port to which the REST server is listening. pub port: u16, /// URL to access REST server. @@ -138,7 +169,7 @@ pub struct Explorer { pub threads_per_server: u32, } -impl Explorer { +impl ExplorerApiConfig { pub fn bind_addr(&self) -> SocketAddr { SocketAddr::new("0.0.0.0".parse().unwrap(), self.port) } @@ -154,6 +185,10 @@ impl Explorer { pub fn offset_limit(&self) -> usize { self.offset_limit.unwrap_or(10000) as usize } + + pub fn from_env() -> Self { + envy_load("explorer", "API_EXPLORER_") + } } #[cfg(test)] @@ -165,7 +200,7 @@ mod tests { fn expected_config() -> ApiConfig { ApiConfig { - web3_json_rpc: Web3JsonRpc { + web3_json_rpc: Web3JsonRpcConfig { http_port: 3050, http_url: "http://127.0.0.1:3050".into(), ws_port: 3051, @@ -192,10 +227,13 @@ mod tests { gas_price_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, max_tx_size: 1000000, - main_node_url: None, vm_execution_cache_misses_limit: None, + vm_concurrency_limit: Some(512), + factory_deps_cache_size_mb: Some(128), + http_threads: Some(128), + ws_threads: Some(256), }, - explorer: Explorer { + explorer: ExplorerApiConfig { port: 3070, url: "http://127.0.0.1:3070".into(), network_stats_polling_interval: Some(1000), @@ -203,12 +241,12 @@ mod tests { offset_limit: Some(10000), threads_per_server: 128, }, - prometheus: Prometheus { + prometheus: PrometheusConfig { listener_port: 3312, pushgateway_url: "http://127.0.0.1:9091".into(), push_interval_ms: Some(100), }, - healthcheck: HealthCheck { port: 8081 }, + healthcheck: HealthCheckConfig { port: 8081 }, } } @@ -232,6 +270,10 @@ API_WEB3_JSON_RPC_ACCOUNT_PKS=0x000000000000000000000000000000000000000000000000 API_WEB3_JSON_RPC_ESTIMATE_GAS_SCALE_FACTOR=1.0 API_WEB3_JSON_RPC_ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION=1000 API_WEB3_JSON_RPC_MAX_TX_SIZE=1000000 +API_WEB3_JSON_RPC_VM_CONCURRENCY_LIMIT=512 +API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 +API_WEB3_JSON_RPC_HTTP_THREADS=128 +API_WEB3_JSON_RPC_WS_THREADS=256 API_EXPLORER_PORT="3070" API_EXPLORER_URL="http://127.0.0.1:3070" API_EXPLORER_NETWORK_STATS_POLLING_INTERVAL="1000" diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index c3e942c5d9df..be23b69b6c02 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -7,16 +7,16 @@ use zksync_basic_types::network::Network; use zksync_basic_types::{Address, H256}; use zksync_contracts::BaseSystemContractsHashes; -use crate::envy_load; +use super::envy_load; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ChainConfig { /// L1 parameters configuration. - pub eth: Eth, + pub network: NetworkConfig, /// State keeper / block generating configuration. pub state_keeper: StateKeeperConfig, /// Operations manager / Metadata calculator. - pub operations_manager: OperationsManager, + pub operations_manager: OperationsManagerConfig, /// mempool configuration pub mempool: MempoolConfig, /// circuit breaker configuration @@ -26,17 +26,17 @@ pub struct ChainConfig { impl ChainConfig { pub fn from_env() -> Self { Self { - eth: envy_load!("eth", "CHAIN_ETH_"), - state_keeper: envy_load!("state_keeper", "CHAIN_STATE_KEEPER_"), - operations_manager: envy_load!("operations_manager", "CHAIN_OPERATIONS_MANAGER_"), - mempool: envy_load!("mempool", "CHAIN_MEMPOOL_"), - circuit_breaker: envy_load!("circuit_breaker", "CHAIN_CIRCUIT_BREAKER_"), + network: NetworkConfig::from_env(), + state_keeper: StateKeeperConfig::from_env(), + operations_manager: OperationsManagerConfig::from_env(), + mempool: MempoolConfig::from_env(), + circuit_breaker: CircuitBreakerConfig::from_env(), } } } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct Eth { +pub struct NetworkConfig { /// Name of the used Ethereum network, e.g. `localhost` or `rinkeby`. pub network: Network, /// Name of current zkSync network @@ -47,6 +47,12 @@ pub struct Eth { pub zksync_network_id: u16, } +impl NetworkConfig { + pub fn from_env() -> Self { + envy_load("network", "CHAIN_ETH_") + } +} + #[derive(Debug, Deserialize, Clone, PartialEq, Default)] pub struct StateKeeperConfig { /// The max number of slots for txs in a block before it should be sealed by the slots sealer. @@ -56,6 +62,10 @@ pub struct StateKeeperConfig { pub block_commit_deadline_ms: u64, /// Number of ms after which a miniblock should be sealed by the timeout sealer. pub miniblock_commit_deadline_ms: u64, + /// Capacity of the queue for asynchronous miniblock sealing. Once this many miniblocks are queued, + /// sealing will block until some of the miniblocks from the queue are processed. + /// 0 means that sealing is synchronous; this is mostly useful for performance comparison, testing etc. + pub miniblock_seal_queue_capacity: usize, /// The max number of gas to spend on an L1 tx before its batch should be sealed by the gas sealer. pub max_single_tx_gas: u32, @@ -71,11 +81,11 @@ pub struct StateKeeperConfig { /// Configuration option for tx to be rejected in case /// it takes more percentage of the block capacity than this value. pub reject_tx_at_gas_percentage: f64, - /// Denotes the percentage of geometry params used in l2 block, that triggers l2 block seal. + /// Denotes the percentage of geometry params used in L2 block that triggers L2 block seal. pub close_block_at_geometry_percentage: f64, - /// Denotes the percentage of l1 params used in l2 block, that triggers l2 block seal. + /// Denotes the percentage of L1 params used in L2 block that triggers L2 block seal. pub close_block_at_eth_params_percentage: f64, - /// Denotes the percentage of l1 gas used in l2 block, that triggers l2 block seal. + /// Denotes the percentage of L1 gas used in l2 block that triggers L2 block seal. pub close_block_at_gas_percentage: f64, pub fee_account_addr: Address, @@ -89,13 +99,11 @@ pub struct StateKeeperConfig { /// Max number of computational gas that validation step is allowed to take. pub validation_computational_gas_limit: u32, pub save_call_traces: bool, - /// Max number of l1 gas price that is allowed to be used in state keeper. - pub max_l1_gas_price: Option, } impl StateKeeperConfig { - pub fn max_l1_gas_price(&self) -> u64 { - self.max_l1_gas_price.unwrap_or(u64::MAX) + pub fn from_env() -> Self { + envy_load("state_keeper", "CHAIN_STATE_KEEPER_") } pub fn base_system_contracts_hashes(&self) -> BaseSystemContractsHashes { @@ -107,12 +115,16 @@ impl StateKeeperConfig { } #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct OperationsManager { +pub struct OperationsManagerConfig { /// Sleep time in ms when there is no new input data pub delay_interval: u64, } -impl OperationsManager { +impl OperationsManagerConfig { + pub fn from_env() -> Self { + envy_load("operations_manager", "CHAIN_OPERATIONS_MANAGER_") + } + pub fn delay_interval(&self) -> Duration { Duration::from_millis(self.delay_interval) } @@ -126,6 +138,10 @@ pub struct CircuitBreakerConfig { } impl CircuitBreakerConfig { + pub fn from_env() -> Self { + envy_load("circuit_breaker", "CHAIN_CIRCUIT_BREAKER_") + } + pub fn sync_interval(&self) -> Duration { Duration::from_millis(self.sync_interval_ms) } @@ -142,6 +158,7 @@ pub struct MempoolConfig { pub capacity: u64, pub stuck_tx_timeout: u64, pub remove_stuck_txs: bool, + pub delay_interval: u64, } impl MempoolConfig { @@ -152,6 +169,14 @@ impl MempoolConfig { pub fn stuck_tx_timeout(&self) -> Duration { Duration::from_secs(self.stuck_tx_timeout) } + + pub fn delay_interval(&self) -> Duration { + Duration::from_millis(self.delay_interval) + } + + pub fn from_env() -> Self { + envy_load("mempool", "CHAIN_MEMPOOL_") + } } #[cfg(test)] @@ -161,7 +186,7 @@ mod tests { fn expected_config() -> ChainConfig { ChainConfig { - eth: Eth { + network: NetworkConfig { network: "localhost".parse().unwrap(), zksync_network: "localhost".to_string(), zksync_network_id: 270, @@ -170,6 +195,7 @@ mod tests { transaction_slots: 50, block_commit_deadline_ms: 2500, miniblock_commit_deadline_ms: 1000, + miniblock_seal_queue_capacity: 10, max_single_tx_gas: 1_000_000, max_allowed_l2_tx_gas_limit: 2_000_000_000, close_block_at_eth_params_percentage: 0.2, @@ -184,9 +210,8 @@ mod tests { default_aa_hash: H256::from(&[254; 32]), validation_computational_gas_limit: 10_000_000, save_call_traces: false, - max_l1_gas_price: Some(100000000), }, - operations_manager: OperationsManager { + operations_manager: OperationsManagerConfig { delay_interval: 100, }, mempool: MempoolConfig { @@ -195,6 +220,7 @@ mod tests { capacity: 1_000_000, stuck_tx_timeout: 10, remove_stuck_txs: true, + delay_interval: 100, }, circuit_breaker: CircuitBreakerConfig { sync_interval_ms: 1000, @@ -222,17 +248,18 @@ CHAIN_STATE_KEEPER_REJECT_TX_AT_ETH_PARAMS_PERCENTAGE="0.8" CHAIN_STATE_KEEPER_REJECT_TX_AT_GAS_PERCENTAGE="0.5" CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE_MS="2500" CHAIN_STATE_KEEPER_MINIBLOCK_COMMIT_DEADLINE_MS="1000" +CHAIN_STATE_KEEPER_MINIBLOCK_SEAL_QUEUE_CAPACITY="10" CHAIN_STATE_KEEPER_FAIR_L2_GAS_PRICE="250000000" CHAIN_STATE_KEEPER_BOOTLOADER_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" CHAIN_STATE_KEEPER_DEFAULT_AA_HASH="0xfefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" CHAIN_STATE_KEEPER_VALIDATION_COMPUTATIONAL_GAS_LIMIT="10000000" CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" -CHAIN_STATE_KEEPER_MAX_L1_GAS_PRICE="100000000" CHAIN_OPERATIONS_MANAGER_DELAY_INTERVAL="100" CHAIN_MEMPOOL_SYNC_INTERVAL_MS="10" CHAIN_MEMPOOL_SYNC_BATCH_SIZE="1000" CHAIN_MEMPOOL_STUCK_TX_TIMEOUT="10" CHAIN_MEMPOOL_REMOVE_STUCK_TXS="true" +CHAIN_MEMPOOL_DELAY_INTERVAL="100" CHAIN_MEMPOOL_CAPACITY="1000000" CHAIN_CIRCUIT_BREAKER_SYNC_INTERVAL_MS="1000" CHAIN_CIRCUIT_BREAKER_HTTP_REQ_MAX_RETRY_NUMBER="5" diff --git a/core/lib/config/src/configs/circuit_synthesizer.rs b/core/lib/config/src/configs/circuit_synthesizer.rs index 7f1a8e7012e3..d0959304ebcf 100644 --- a/core/lib/config/src/configs/circuit_synthesizer.rs +++ b/core/lib/config/src/configs/circuit_synthesizer.rs @@ -2,7 +2,7 @@ use std::time::Duration; use serde::Deserialize; -use crate::envy_load; +use super::envy_load; /// Configuration for the witness generation #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -27,7 +27,7 @@ pub struct CircuitSynthesizerConfig { impl CircuitSynthesizerConfig { pub fn from_env() -> Self { - envy_load!("circuit_synthesizer", "CIRCUIT_SYNTHESIZER_") + envy_load("circuit_synthesizer", "CIRCUIT_SYNTHESIZER_") } pub fn generation_timeout(&self) -> Duration { diff --git a/core/lib/config/src/configs/contract_verifier.rs b/core/lib/config/src/configs/contract_verifier.rs index 26323e7bb97b..4c42ae45cc46 100644 --- a/core/lib/config/src/configs/contract_verifier.rs +++ b/core/lib/config/src/configs/contract_verifier.rs @@ -3,7 +3,7 @@ use std::time::Duration; // External uses use serde::Deserialize; // Local uses -use crate::envy_load; +use super::envy_load; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ContractVerifierConfig { @@ -17,7 +17,7 @@ pub struct ContractVerifierConfig { impl ContractVerifierConfig { pub fn from_env() -> Self { - envy_load!("contract_verifier", "CONTRACT_VERIFIER_") + envy_load("contract_verifier", "CONTRACT_VERIFIER_") } pub fn compilation_timeout(&self) -> Duration { diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 09d2ac3c814d..5775953a65d4 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -3,7 +3,7 @@ use serde::Deserialize; // Workspace uses use zksync_basic_types::{Address, H256}; // Local uses -use crate::envy_load; +use super::envy_load; /// Data about deployed contracts. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -22,13 +22,15 @@ pub struct ContractsConfig { pub l1_erc20_bridge_proxy_addr: Address, pub l1_erc20_bridge_impl_addr: Address, pub l2_erc20_bridge_addr: Address, + pub l1_weth_bridge_proxy_addr: Option
, + pub l2_weth_bridge_addr: Option
, pub l1_allow_list_addr: Address, pub l2_testnet_paymaster_addr: Option
, } impl ContractsConfig { pub fn from_env() -> Self { - envy_load!("contracts", "CONTRACTS_") + envy_load("contracts", "CONTRACTS_") } } @@ -56,6 +58,8 @@ mod tests { l1_erc20_bridge_impl_addr: addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888"), l2_erc20_bridge_addr: addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888"), l1_allow_list_addr: addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888"), + l1_weth_bridge_proxy_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), + l2_weth_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_testnet_paymaster_addr: Some(addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF")), } } @@ -78,6 +82,8 @@ CONTRACTS_L1_ERC20_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888 CONTRACTS_L1_ALLOW_LIST_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_ERC20_BRIDGE_IMPL_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" +CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" +CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" "#; set_env(config); diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index 1f6e0563294a..67be709d3b3e 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -5,43 +5,28 @@ use std::{env, str::FromStr, time::Duration}; /// Database configuration. #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DBConfig { - /// Path to the database data directory. - pub path: String, /// Path to the database data directory that serves state cache. pub state_keeper_db_path: String, /// Path to merkle tree backup directory. pub merkle_tree_backup_path: String, - /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*old* implementation) - /// if the lightweight syncing is enabled. - pub merkle_tree_fast_ssd_path: String, - /// Fast SSD path. Used as a RocksDB dir for the Merkle tree (*new* implementation). - // We cannot compute this path like - // - // ``` - // new_merkle_tree_ssd_path = merkle_tree_fast_ssd_path.join("new") - // ``` - // - // because (1) we need to maintain backward compatibility; (2) it looks dangerous - // to place a RocksDB instance in a subdirectory of another RocksDB instance. + /// Fast SSD path. Used as a RocksDB dir for the Merkle tree. pub new_merkle_tree_ssd_path: String, - /// Throttle interval for the new tree implementation in milliseconds. This interval will be - /// applied after each time the tree makes progress. + /// Throttle interval for the Merkle tree in milliseconds. This interval will be applied after + /// each time the tree makes progress. pub new_merkle_tree_throttle_ms: u64, /// Number of backups to keep. pub backup_count: usize, /// Time interval between performing backups. pub backup_interval_ms: u64, - /// Maximum number of blocks to be processed by the full tree at a time. + /// Maximum number of blocks to be processed by the Merkle tree at a time. pub max_block_batch: usize, } impl Default for DBConfig { fn default() -> Self { Self { - path: "./db".to_owned(), state_keeper_db_path: "./db/state_keeper".to_owned(), merkle_tree_backup_path: "./db/backups".to_owned(), - merkle_tree_fast_ssd_path: "./db/lightweight".to_owned(), new_merkle_tree_ssd_path: "./db/lightweight-new".to_owned(), new_merkle_tree_throttle_ms: 0, backup_count: 5, @@ -54,18 +39,12 @@ impl Default for DBConfig { impl DBConfig { pub fn from_env() -> Self { let mut config = DBConfig::default(); - if let Ok(path) = env::var("DATABASE_PATH") { - config.path = path; - } if let Ok(path) = env::var("DATABASE_STATE_KEEPER_DB_PATH") { config.state_keeper_db_path = path; } if let Ok(path) = env::var("DATABASE_MERKLE_TREE_BACKUP_PATH") { config.merkle_tree_backup_path = path; } - if let Ok(path) = env::var("DATABASE_MERKLE_TREE_FAST_SSD_PATH") { - config.merkle_tree_fast_ssd_path = path; - } if let Ok(path) = env::var("DATABASE_NEW_MERKLE_TREE_SSD_PATH") { config.new_merkle_tree_ssd_path = path; } @@ -89,11 +68,6 @@ impl DBConfig { env_var.parse().ok() } - /// Path to the database data directory. - pub fn path(&self) -> &str { - &self.path - } - /// Path to the database data directory that serves state cache. pub fn state_keeper_db_path(&self) -> &str { &self.state_keeper_db_path @@ -104,11 +78,7 @@ impl DBConfig { &self.merkle_tree_backup_path } - pub fn merkle_tree_fast_ssd_path(&self) -> &str { - &self.merkle_tree_fast_ssd_path - } - - /// Throttle interval for the new Merkle tree implementation. + /// Throttle interval for the Merkle tree. pub fn new_merkle_tree_throttle_interval(&self) -> Duration { Duration::from_millis(self.new_merkle_tree_throttle_ms) } @@ -135,10 +105,8 @@ mod tests { #[test] fn from_env() { let config = r#" -DATABASE_PATH="./db" DATABASE_STATE_KEEPER_DB_PATH="./db/state_keeper" DATABASE_MERKLE_TREE_BACKUP_PATH="./db/backups" -DATABASE_MERKLE_TREE_FAST_SSD_PATH="./db/lightweight" DATABASE_NEW_MERKLE_TREE_SSD_PATH="./db/lightweight-new" DATABASE_NEW_MERKLE_TREE_THROTTLE_MS=0 DATABASE_BACKUP_COUNT=5 @@ -154,19 +122,17 @@ DATABASE_MAX_BLOCK_BATCH=100 /// Checks the correctness of the config helper methods. #[test] fn methods() { - let config = DBConfig::default(); + let db_config = DBConfig::default(); - assert_eq!(config.path(), &config.path); - assert_eq!(config.state_keeper_db_path(), &config.state_keeper_db_path); assert_eq!( - config.merkle_tree_backup_path(), - &config.merkle_tree_backup_path + db_config.state_keeper_db_path(), + &db_config.state_keeper_db_path ); assert_eq!( - config.merkle_tree_fast_ssd_path(), - &config.merkle_tree_fast_ssd_path + db_config.merkle_tree_backup_path(), + &db_config.merkle_tree_backup_path ); - assert_eq!(config.backup_count(), config.backup_count); - assert_eq!(config.backup_interval().as_secs(), 60); + assert_eq!(db_config.backup_count(), db_config.backup_count); + assert_eq!(db_config.backup_interval().as_secs(), 60); } } diff --git a/core/lib/config/src/configs/eth_client.rs b/core/lib/config/src/configs/eth_client.rs index 00fc08bf8de5..6e293eac26b5 100644 --- a/core/lib/config/src/configs/eth_client.rs +++ b/core/lib/config/src/configs/eth_client.rs @@ -1,7 +1,7 @@ // External uses use serde::Deserialize; // Local uses -use crate::envy_load; +use super::envy_load; /// Configuration for the Ethereum gateways. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -14,7 +14,7 @@ pub struct ETHClientConfig { impl ETHClientConfig { pub fn from_env() -> Self { - let config: Self = envy_load!("eth_client", "ETH_CLIENT_"); + let config: Self = envy_load("eth_client", "ETH_CLIENT_"); if config.web3_url.find(',').is_some() { panic!( "Multiple web3 URLs aren't supported anymore. Provided invalid value: {}", diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index f10462dd2dbf..0cb316fb4621 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -3,9 +3,9 @@ use std::time::Duration; // External uses use serde::Deserialize; // Workspace uses -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::H256; // Local uses -use crate::envy_load; +use super::envy_load; /// Configuration for the Ethereum sender crate. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -19,8 +19,8 @@ pub struct ETHSenderConfig { impl ETHSenderConfig { pub fn from_env() -> Self { Self { - sender: envy_load!("eth_sender", "ETH_SENDER_SENDER_"), - gas_adjuster: envy_load!("eth_sender.gas_adjuster", "ETH_SENDER_GAS_ADJUSTER_"), + sender: SenderConfig::from_env(), + gas_adjuster: GasAdjusterConfig::from_env(), } } } @@ -35,12 +35,9 @@ pub enum ProofSendingMode { #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, - /// Private key of the operator account. - pub operator_private_key: H256, - /// Address of the operator account. - pub operator_commit_eth_addr: Address, - /// mount of confirmations required to consider L1 transaction committed. - pub wait_confirmations: u64, + /// Amount of confirmations required to consider L1 transaction committed. + /// If not specified L1 transaction will be considered finalized once its block is finalized. + pub wait_confirmations: Option, /// Node polling period in seconds. pub tx_poll_period: u64, /// Aggregate txs polling period in seconds. @@ -76,6 +73,17 @@ impl SenderConfig { pub fn aggregate_tx_poll_period(&self) -> Duration { Duration::from_secs(self.aggregate_tx_poll_period) } + + // Don't load private key, if it's not required. + pub fn private_key(&self) -> Option { + std::env::var("ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY") + .ok() + .map(|pk| pk.parse().unwrap()) + } + + pub fn from_env() -> Self { + envy_load("eth_sender", "ETH_SENDER_SENDER_") + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq)] @@ -94,6 +102,8 @@ pub struct GasAdjusterConfig { pub internal_enforced_l1_gas_price: Option, /// Node polling period in seconds pub poll_period: u64, + /// Max number of l1 gas price that is allowed to be used in state keeper. + pub max_l1_gas_price: Option, } impl GasAdjusterConfig { @@ -101,12 +111,20 @@ impl GasAdjusterConfig { pub fn poll_period(&self) -> Duration { Duration::from_secs(self.poll_period) } + + pub fn max_l1_gas_price(&self) -> u64 { + self.max_l1_gas_price.unwrap_or(u64::MAX) + } + + pub fn from_env() -> Self { + envy_load("eth_sender.gas_adjuster", "ETH_SENDER_GAS_ADJUSTER_") + } } #[cfg(test)] mod tests { use super::*; - use crate::configs::test_utils::{addr, hash, set_env}; + use crate::configs::test_utils::{hash, set_env}; fn expected_config() -> ETHSenderConfig { ETHSenderConfig { @@ -121,14 +139,10 @@ mod tests { timestamp_criteria_max_allowed_lag: 30, max_aggregated_blocks_to_commit: 3, max_aggregated_blocks_to_execute: 4, - wait_confirmations: 1, + wait_confirmations: Some(1), tx_poll_period: 3, aggregate_tx_poll_period: 3, max_txs_in_flight: 3, - operator_private_key: hash( - "27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be", - ), - operator_commit_eth_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"), proof_sending_mode: ProofSendingMode::SkipEveryProof, l1_batch_min_age_before_execute_seconds: Some(1000), max_acceptable_priority_fee_in_gwei: 100_000_000_000, @@ -141,6 +155,7 @@ mod tests { internal_l1_pricing_multiplier: 0.8, internal_enforced_l1_gas_price: None, poll_period: 15, + max_l1_gas_price: Some(100000000), }, } } @@ -153,7 +168,6 @@ ETH_SENDER_SENDER_TX_POLL_PERIOD="3" ETH_SENDER_SENDER_AGGREGATE_TX_POLL_PERIOD="3" ETH_SENDER_SENDER_MAX_TXS_IN_FLIGHT="3" ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" -ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7" ETH_SENDER_SENDER_PROOF_SENDING_MODE="SkipEveryProof" ETH_SENDER_GAS_ADJUSTER_DEFAULT_PRIORITY_FEE_PER_GAS="20000000000" ETH_SENDER_GAS_ADJUSTER_MAX_BASE_FEE_SAMPLES="10000" @@ -161,6 +175,7 @@ ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_A="1.5" ETH_SENDER_GAS_ADJUSTER_PRICING_FORMULA_PARAMETER_B="1.0005" ETH_SENDER_GAS_ADJUSTER_INTERNAL_L1_PRICING_MULTIPLIER="0.8" ETH_SENDER_GAS_ADJUSTER_POLL_PERIOD="15" +ETH_SENDER_GAS_ADJUSTER_MAX_L1_GAS_PRICE="100000000" ETH_SENDER_WAIT_FOR_PROOFS="false" ETH_SENDER_SENDER_AGGREGATED_PROOF_SIZES="1,5" ETH_SENDER_SENDER_MAX_AGGREGATED_BLOCKS_TO_COMMIT="3" @@ -178,6 +193,10 @@ ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" let actual = ETHSenderConfig::from_env(); assert_eq!(actual, expected_config()); + assert_eq!( + actual.sender.private_key().unwrap(), + hash("27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be") + ); } /// Checks the correctness of the config helper methods. diff --git a/core/lib/config/src/configs/eth_watch.rs b/core/lib/config/src/configs/eth_watch.rs index f1b6e003651a..0ae24f672d0c 100644 --- a/core/lib/config/src/configs/eth_watch.rs +++ b/core/lib/config/src/configs/eth_watch.rs @@ -3,14 +3,14 @@ use std::time::Duration; // External uses use serde::Deserialize; // Local uses -use crate::envy_load; +use super::envy_load; /// Configuration for the Ethereum sender crate. #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ETHWatchConfig { /// Amount of confirmations for the priority operation to be processed. - /// In production this should be a non-zero value because of block reverts. - pub confirmations_for_eth_event: u64, + /// If not specified operation will be processed once its block is finalized. + pub confirmations_for_eth_event: Option, /// How often we want to poll the Ethereum node. /// Value in milliseconds. pub eth_node_poll_interval: u64, @@ -18,7 +18,7 @@ pub struct ETHWatchConfig { impl ETHWatchConfig { pub fn from_env() -> Self { - envy_load!("eth_watch", "ETH_WATCH_") + envy_load("eth_watch", "ETH_WATCH_") } /// Converts `self.eth_node_poll_interval` into `Duration`. @@ -34,7 +34,7 @@ mod tests { fn expected_config() -> ETHWatchConfig { ETHWatchConfig { - confirmations_for_eth_event: 0, + confirmations_for_eth_event: Some(0), eth_node_poll_interval: 300, } } diff --git a/core/lib/config/src/configs/fetcher.rs b/core/lib/config/src/configs/fetcher.rs index b83e5794651e..98d6c859058b 100644 --- a/core/lib/config/src/configs/fetcher.rs +++ b/core/lib/config/src/configs/fetcher.rs @@ -5,7 +5,7 @@ use std::time::Duration; use serde::Deserialize; // Workspace uses // Local uses -use crate::envy_load; +use super::envy_load; #[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum TokenListSource { @@ -53,11 +53,11 @@ pub struct FetcherConfig { impl FetcherConfig { pub fn from_env() -> Self { Self { - token_list: envy_load!("token_list", "FETCHER_TOKEN_LIST_"), - token_price: envy_load!("token_price", "FETCHER_TOKEN_PRICE_"), - token_trading_volume: envy_load!( + token_list: envy_load("token_list", "FETCHER_TOKEN_LIST_"), + token_price: envy_load("token_price", "FETCHER_TOKEN_PRICE_"), + token_trading_volume: envy_load( "token_trading_volume", - "FETCHER_TOKEN_TRADING_VOLUME_" + "FETCHER_TOKEN_TRADING_VOLUME_", ), } } diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs new file mode 100644 index 000000000000..057c1572516d --- /dev/null +++ b/core/lib/config/src/configs/fri_prover.rs @@ -0,0 +1,69 @@ +use super::envy_load; +use serde::Deserialize; +use std::time::Duration; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub enum SetupLoadMode { + FromDisk, + FromMemory, +} + +/// Configuration for the fri prover application +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriProverConfig { + pub setup_data_path: String, + pub prometheus_port: u16, + pub max_attempts: u32, + pub generation_timeout_in_secs: u16, + pub base_layer_circuit_ids_to_be_verified: Vec, + pub recursive_layer_circuit_ids_to_be_verified: Vec, + pub setup_load_mode: SetupLoadMode, + pub specialized_group_id: u8, +} + +impl FriProverConfig { + pub fn from_env() -> Self { + envy_load("fri_prover", "FRI_PROVER_") + } + + pub fn proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.generation_timeout_in_secs as u64) + } +} + +#[cfg(test)] +mod tests { + use crate::configs::test_utils::set_env; + + use super::*; + + fn expected_config() -> FriProverConfig { + FriProverConfig { + setup_data_path: "/usr/src/setup-data".to_string(), + prometheus_port: 3315, + max_attempts: 10, + generation_timeout_in_secs: 300, + base_layer_circuit_ids_to_be_verified: vec![1, 5], + recursive_layer_circuit_ids_to_be_verified: vec![1, 2, 3], + setup_load_mode: SetupLoadMode::FromDisk, + specialized_group_id: 10, + } + } + + #[test] + fn from_env() { + let config = r#" +FRI_PROVER_SETUP_DATA_PATH="/usr/src/setup-data" +FRI_PROVER_PROMETHEUS_PORT="3315" +FRI_PROVER_MAX_ATTEMPTS="10" +FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" +FRI_PROVER_BASE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,5" +FRI_PROVER_RECURSIVE_LAYER_CIRCUIT_IDS_TO_BE_VERIFIED="1,2,3" +FRI_PROVER_SETUP_LOAD_MODE="FromDisk" +FRI_PROVER_SPECIALIZED_GROUP_ID="10" + "#; + set_env(config); + let actual = FriProverConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs new file mode 100644 index 000000000000..57b89489ce0e --- /dev/null +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -0,0 +1,438 @@ +use std::collections::{HashMap, HashSet}; +use std::env; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Deserialize, Serialize, Clone, Eq, Hash, PartialEq)] +pub struct CircuitIdRoundTuple { + pub circuit_id: u8, + pub aggregation_round: u8, +} + +impl CircuitIdRoundTuple { + pub fn new(circuit_id: u8, aggregation_round: u8) -> Self { + Self { + circuit_id, + aggregation_round, + } + } +} + +fn load_from_env_variable() -> HashMap> { + // Prepare a hash map to store the mapping of group to a vector of tuples + let mut groups: HashMap> = (0..=12) + .map(|i| (format!("group_{}", i), HashSet::new())) + .collect(); + + // Separate environment variables into Circuit Id and Aggregation Round + let mut circuit_ids = HashMap::new(); + let mut aggregation_rounds = HashMap::new(); + for (key, value) in env::vars() { + if key.contains("_CIRCUIT_ID") { + circuit_ids.insert(key, value); + } else if key.contains("_AGGREGATION_ROUND") { + aggregation_rounds.insert(key, value); + } + } + + // Iterate over all circuit id variables + for (key, value_str) in circuit_ids { + let key_parts: Vec<&str> = key.split('_').collect(); + if let (Some(group_key), Some(value), Some(index_str)) = ( + key_parts.get(4), + value_str.parse::().ok(), + key_parts.get(5), + ) { + let round_key = format!( + "FRI_PROVER_GROUP_GROUP_{}_{}_AGGREGATION_ROUND", + group_key, index_str + ); + if let Some(round_str) = aggregation_rounds.get(&round_key) { + if let Ok(round) = round_str.parse::() { + let tuple = CircuitIdRoundTuple::new(value, round); + if let Some(group) = groups.get_mut(&format!("group_{}", group_key)) { + group.insert(tuple); + } + } + } + } + } + groups +} + +/// Configuration for the grouping of specialized provers. +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriProverGroupConfig { + pub group_0: HashSet, + pub group_1: HashSet, + pub group_2: HashSet, + pub group_3: HashSet, + pub group_4: HashSet, + pub group_5: HashSet, + pub group_6: HashSet, + pub group_7: HashSet, + pub group_8: HashSet, + pub group_9: HashSet, + pub group_10: HashSet, + pub group_11: HashSet, + pub group_12: HashSet, +} + +impl FriProverGroupConfig { + pub fn from_env() -> Self { + let mut groups = load_from_env_variable(); + let config = FriProverGroupConfig { + group_0: groups.remove("group_0").unwrap_or_default(), + group_1: groups.remove("group_1").unwrap_or_default(), + group_2: groups.remove("group_2").unwrap_or_default(), + group_3: groups.remove("group_3").unwrap_or_default(), + group_4: groups.remove("group_4").unwrap_or_default(), + group_5: groups.remove("group_5").unwrap_or_default(), + group_6: groups.remove("group_6").unwrap_or_default(), + group_7: groups.remove("group_7").unwrap_or_default(), + group_8: groups.remove("group_8").unwrap_or_default(), + group_9: groups.remove("group_9").unwrap_or_default(), + group_10: groups.remove("group_10").unwrap_or_default(), + group_11: groups.remove("group_11").unwrap_or_default(), + group_12: groups.remove("group_12").unwrap_or_default(), + }; + config.validate(); + config + } + + pub fn get_circuit_ids_for_group_id(&self, group_id: u8) -> Option> { + match group_id { + 0 => Some(self.group_0.clone().into_iter().collect()), + 1 => Some(self.group_1.clone().into_iter().collect()), + 2 => Some(self.group_2.clone().into_iter().collect()), + 3 => Some(self.group_3.clone().into_iter().collect()), + 4 => Some(self.group_4.clone().into_iter().collect()), + 5 => Some(self.group_5.clone().into_iter().collect()), + 6 => Some(self.group_6.clone().into_iter().collect()), + 7 => Some(self.group_7.clone().into_iter().collect()), + 8 => Some(self.group_8.clone().into_iter().collect()), + 9 => Some(self.group_9.clone().into_iter().collect()), + 10 => Some(self.group_10.clone().into_iter().collect()), + 11 => Some(self.group_11.clone().into_iter().collect()), + 12 => Some(self.group_12.clone().into_iter().collect()), + _ => None, + } + } + + /// check all_circuit ids present exactly once + /// and For each aggregation round, check that the circuit ids are in the correct range. + /// For example, in aggregation round 0, the circuit ids should be 1 to 13. + /// In aggregation round 1, the circuit ids should be 3 to 15. + /// In aggregation round 2, the circuit ids should be 2. + /// In aggregation round 3, the circuit ids should be 1. + fn validate(&self) { + let mut rounds: Vec> = vec![Vec::new(); 4]; + let groups = [ + &self.group_0, + &self.group_1, + &self.group_2, + &self.group_3, + &self.group_4, + &self.group_5, + &self.group_6, + &self.group_7, + &self.group_8, + &self.group_9, + &self.group_10, + &self.group_11, + &self.group_12, + ]; + for group in groups { + for circuit_round in group { + rounds[circuit_round.aggregation_round as usize].push(circuit_round.clone()); + } + } + + for (round, round_data) in rounds.iter().enumerate() { + let circuit_ids: Vec = round_data.iter().map(|x| x.circuit_id).collect(); + let unique_circuit_ids: HashSet = circuit_ids.iter().copied().collect(); + let duplicates: HashSet = circuit_ids + .iter() + .filter(|id| circuit_ids.iter().filter(|x| x == id).count() > 1) + .copied() + .collect(); + + match round { + 0 => { + let expected_range: Vec<_> = (1..=13).collect(); + let missing_ids: Vec<_> = expected_range + .iter() + .filter(|id| !circuit_ids.contains(id)) + .collect(); + assert!( + missing_ids.is_empty(), + "Circuit IDs for round {} are missing: {:?}", + round, + missing_ids + ); + assert_eq!( + circuit_ids.len(), + unique_circuit_ids.len(), + "Circuit IDs: {:?} should be unique for round {}.", + duplicates, + round + ); + let not_in_range: Vec<_> = circuit_ids + .iter() + .filter(|&id| !expected_range.contains(id)) + .collect(); + assert!(not_in_range.is_empty(), "Aggregation round 0 should only contain circuit IDs 1 to 13. Ids out of range: {:?}", not_in_range); + } + 1 => { + let expected_range: Vec<_> = (3..=15).collect(); + let missing_ids: Vec<_> = expected_range + .iter() + .filter(|id| !circuit_ids.contains(id)) + .collect(); + assert!( + missing_ids.is_empty(), + "Circuit IDs for round {} are missing: {:?}", + round, + missing_ids + ); + assert_eq!( + circuit_ids.len(), + unique_circuit_ids.len(), + "Circuit IDs: {:?} should be unique for round {}.", + duplicates, + round + ); + let not_in_range: Vec<_> = circuit_ids + .iter() + .filter(|&id| !expected_range.contains(id)) + .collect(); + assert!(not_in_range.is_empty(), "Aggregation round 1 should only contain circuit IDs 3 to 15. Ids out of range: {:?}", not_in_range); + } + 2 => { + let expected_range = vec![2]; + let missing_ids: Vec<_> = expected_range + .iter() + .filter(|id| !circuit_ids.contains(id)) + .collect(); + assert!( + missing_ids.is_empty(), + "Circuit IDs for round {} are missing: {:?}", + round, + missing_ids + ); + assert_eq!( + circuit_ids.len(), + unique_circuit_ids.len(), + "Circuit IDs: {:?} should be unique for round {}.", + duplicates, + round + ); + let not_in_range: Vec<_> = circuit_ids + .iter() + .filter(|&id| !expected_range.contains(id)) + .collect(); + assert!(not_in_range.is_empty(), "Aggregation round 2 should only contain circuit ID 2. Ids out of range: {:?}", not_in_range); + } + 3 => { + let expected_range = vec![1]; + let missing_ids: Vec<_> = expected_range + .iter() + .filter(|id| !circuit_ids.contains(id)) + .collect(); + assert!( + missing_ids.is_empty(), + "Circuit IDs for round {} are missing: {:?}", + round, + missing_ids + ); + assert_eq!( + circuit_ids.len(), + unique_circuit_ids.len(), + "Circuit IDs: {:?} should be unique for round {}.", + duplicates, + round + ); + let not_in_range: Vec<_> = circuit_ids + .iter() + .filter(|&id| !expected_range.contains(id)) + .collect(); + assert!(not_in_range.is_empty(), "Aggregation round 3 should only contain circuit ID 1. Ids out of range: {:?}", not_in_range); + } + _ => { + panic!("Unknown round {}", round) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn expected_config() -> FriProverGroupConfig { + FriProverGroupConfig { + group_0: vec![ + CircuitIdRoundTuple::new(1, 3), + CircuitIdRoundTuple::new(2, 2), + ] + .into_iter() + .collect::>(), + group_1: vec![CircuitIdRoundTuple::new(1, 0)] + .into_iter() + .collect::>(), + group_2: vec![ + CircuitIdRoundTuple::new(2, 0), + CircuitIdRoundTuple::new(4, 0), + CircuitIdRoundTuple::new(6, 0), + CircuitIdRoundTuple::new(9, 0), + ] + .into_iter() + .collect::>(), + group_3: vec![CircuitIdRoundTuple::new(3, 0)] + .into_iter() + .collect::>(), + group_4: vec![ + CircuitIdRoundTuple::new(11, 0), + CircuitIdRoundTuple::new(12, 0), + CircuitIdRoundTuple::new(13, 0), + ] + .into_iter() + .collect::>(), + group_5: vec![CircuitIdRoundTuple::new(5, 0)] + .into_iter() + .collect::>(), + group_6: vec![CircuitIdRoundTuple::new(3, 1)] + .into_iter() + .collect::>(), + group_7: vec![CircuitIdRoundTuple::new(7, 0)] + .into_iter() + .collect::>(), + group_8: vec![CircuitIdRoundTuple::new(8, 0)] + .into_iter() + .collect::>(), + group_9: vec![ + CircuitIdRoundTuple::new(12, 1), + CircuitIdRoundTuple::new(13, 1), + CircuitIdRoundTuple::new(14, 1), + CircuitIdRoundTuple::new(15, 1), + ] + .into_iter() + .collect::>(), + group_10: vec![CircuitIdRoundTuple::new(10, 0)] + .into_iter() + .collect::>(), + group_11: vec![ + CircuitIdRoundTuple::new(7, 1), + CircuitIdRoundTuple::new(8, 1), + CircuitIdRoundTuple::new(10, 1), + CircuitIdRoundTuple::new(11, 1), + ] + .into_iter() + .collect::>(), + group_12: vec![ + CircuitIdRoundTuple::new(4, 1), + CircuitIdRoundTuple::new(5, 1), + CircuitIdRoundTuple::new(6, 1), + CircuitIdRoundTuple::new(9, 1), + ] + .into_iter() + .collect::>(), + } + } + + #[test] + fn from_env() { + let groups = [ + ("FRI_PROVER_GROUP_GROUP_0_0", CircuitIdRoundTuple::new(1, 3)), + ("FRI_PROVER_GROUP_GROUP_0_1", CircuitIdRoundTuple::new(2, 2)), + ("FRI_PROVER_GROUP_GROUP_1_0", CircuitIdRoundTuple::new(1, 0)), + ("FRI_PROVER_GROUP_GROUP_2_0", CircuitIdRoundTuple::new(2, 0)), + ("FRI_PROVER_GROUP_GROUP_2_1", CircuitIdRoundTuple::new(4, 0)), + ("FRI_PROVER_GROUP_GROUP_2_2", CircuitIdRoundTuple::new(6, 0)), + ("FRI_PROVER_GROUP_GROUP_2_3", CircuitIdRoundTuple::new(9, 0)), + ("FRI_PROVER_GROUP_GROUP_3_0", CircuitIdRoundTuple::new(3, 0)), + ( + "FRI_PROVER_GROUP_GROUP_4_0", + CircuitIdRoundTuple::new(11, 0), + ), + ( + "FRI_PROVER_GROUP_GROUP_4_1", + CircuitIdRoundTuple::new(12, 0), + ), + ( + "FRI_PROVER_GROUP_GROUP_4_2", + CircuitIdRoundTuple::new(13, 0), + ), + ("FRI_PROVER_GROUP_GROUP_5_0", CircuitIdRoundTuple::new(5, 0)), + ("FRI_PROVER_GROUP_GROUP_6_0", CircuitIdRoundTuple::new(3, 1)), + ("FRI_PROVER_GROUP_GROUP_7_0", CircuitIdRoundTuple::new(7, 0)), + ("FRI_PROVER_GROUP_GROUP_8_0", CircuitIdRoundTuple::new(8, 0)), + ( + "FRI_PROVER_GROUP_GROUP_9_0", + CircuitIdRoundTuple::new(12, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_9_1", + CircuitIdRoundTuple::new(13, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_9_2", + CircuitIdRoundTuple::new(14, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_9_3", + CircuitIdRoundTuple::new(15, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_10_0", + CircuitIdRoundTuple::new(10, 0), + ), + ( + "FRI_PROVER_GROUP_GROUP_11_0", + CircuitIdRoundTuple::new(7, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_11_1", + CircuitIdRoundTuple::new(8, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_11_2", + CircuitIdRoundTuple::new(10, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_11_3", + CircuitIdRoundTuple::new(11, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_12_0", + CircuitIdRoundTuple::new(4, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_12_1", + CircuitIdRoundTuple::new(5, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_12_2", + CircuitIdRoundTuple::new(6, 1), + ), + ( + "FRI_PROVER_GROUP_GROUP_12_3", + CircuitIdRoundTuple::new(9, 1), + ), + ]; + + for (key_base, circuit_round_tuple) in &groups { + let circuit_id_key = format!("{}_CIRCUIT_ID", key_base); + let aggregation_round_key = format!("{}_AGGREGATION_ROUND", key_base); + env::set_var(&circuit_id_key, circuit_round_tuple.circuit_id.to_string()); + env::set_var( + &aggregation_round_key, + circuit_round_tuple.aggregation_round.to_string(), + ); + } + + let actual = FriProverGroupConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/fri_witness_generator.rs b/core/lib/config/src/configs/fri_witness_generator.rs new file mode 100644 index 000000000000..bfb9e2e8b481 --- /dev/null +++ b/core/lib/config/src/configs/fri_witness_generator.rs @@ -0,0 +1,73 @@ +use std::time::Duration; + +// Built-in uses +// External uses +use serde::Deserialize; + +// Local uses +use super::envy_load; + +/// Configuration for the fri witness generation +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct FriWitnessGeneratorConfig { + /// Max time for witness to be generated + pub generation_timeout_in_secs: u16, + /// Max attempts for generating witness + pub max_attempts: u32, + // Percentage of the blocks that gets proven in the range [0.0, 1.0] + // when 0.0 implies all blocks are skipped and 1.0 implies all blocks are proven. + pub blocks_proving_percentage: Option, + pub dump_arguments_for_blocks: Vec, + // Optional l1 batch number to process block until(inclusive). + // This parameter is used in case of performing circuit upgrades(VK/Setup keys), + // to not let witness-generator pick new job and finish all the existing jobs with old circuit. + pub last_l1_batch_to_process: Option, + // Force process block with specified number when sampling is enabled. + pub force_process_block: Option, +} + +impl FriWitnessGeneratorConfig { + pub fn from_env() -> Self { + envy_load("fri_witness", "FRI_WITNESS_") + } + + pub fn witness_generation_timeout(&self) -> Duration { + Duration::from_secs(self.generation_timeout_in_secs as u64) + } + + pub fn last_l1_batch_to_process(&self) -> u32 { + self.last_l1_batch_to_process.unwrap_or(u32::MAX) + } +} + +#[cfg(test)] +mod tests { + use crate::configs::test_utils::set_env; + + use super::*; + + fn expected_config() -> FriWitnessGeneratorConfig { + FriWitnessGeneratorConfig { + generation_timeout_in_secs: 900u16, + max_attempts: 4, + blocks_proving_percentage: Some(30), + dump_arguments_for_blocks: vec![2, 3], + last_l1_batch_to_process: None, + force_process_block: Some(1), + } + } + + #[test] + fn from_env() { + let config = r#" + FRI_WITNESS_GENERATION_TIMEOUT_IN_SECS=900 + FRI_WITNESS_MAX_ATTEMPTS=4 + FRI_WITNESS_DUMP_ARGUMENTS_FOR_BLOCKS="2,3" + FRI_WITNESS_BLOCKS_PROVING_PERCENTAGE="30" + FRI_WITNESS_FORCE_PROCESS_BLOCK="1" + "#; + set_env(config); + let actual = FriWitnessGeneratorConfig::from_env(); + assert_eq!(actual, expected_config()); + } +} diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index fb80311ddd7c..f54d4f84703c 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -1,6 +1,6 @@ use serde::Deserialize; -use crate::envy_load; +use super::envy_load; /// Configuration for the house keeper. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -12,11 +12,16 @@ pub struct HouseKeeperConfig { pub prover_stats_reporting_interval_ms: u64, pub witness_job_moving_interval_ms: u64, pub witness_generator_stats_reporting_interval_ms: u64, + pub fri_witness_job_moving_interval_ms: u64, + pub fri_prover_job_retrying_interval_ms: u64, + pub fri_witness_generator_job_retrying_interval_ms: u64, + pub prover_db_pool_size: u32, + pub fri_prover_stats_reporting_interval_ms: u64, } impl HouseKeeperConfig { pub fn from_env() -> Self { - envy_load!("house_keeper", "HOUSE_KEEPER_") + envy_load("house_keeper", "HOUSE_KEEPER_") } } @@ -35,6 +40,11 @@ mod tests { prover_stats_reporting_interval_ms: 5_000, witness_job_moving_interval_ms: 30_000, witness_generator_stats_reporting_interval_ms: 10_000, + fri_witness_job_moving_interval_ms: 40_000, + fri_prover_job_retrying_interval_ms: 30_000, + fri_witness_generator_job_retrying_interval_ms: 30_000, + prover_db_pool_size: 2, + fri_prover_stats_reporting_interval_ms: 30_000, } } @@ -48,6 +58,11 @@ HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="300000" HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" +HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS="40000" +HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS="30000" +HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" +HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" +HOUSE_KEEPER_FRI_PROVER_STATS_REPORTING_INTERVAL_MS="30000" "#; set_env(config); let actual = HouseKeeperConfig::from_env(); diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 52889cf5f33d..0ac90de3020f 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -1,13 +1,19 @@ // Public re-exports pub use self::{ - api::ApiConfig, chain::ChainConfig, circuit_synthesizer::CircuitSynthesizerConfig, - contract_verifier::ContractVerifierConfig, contracts::ContractsConfig, database::DBConfig, - eth_client::ETHClientConfig, eth_sender::ETHSenderConfig, eth_sender::GasAdjusterConfig, - eth_watch::ETHWatchConfig, fetcher::FetcherConfig, nfs::NfsConfig, + alerts::AlertsConfig, api::ApiConfig, chain::ChainConfig, + circuit_synthesizer::CircuitSynthesizerConfig, contract_verifier::ContractVerifierConfig, + contracts::ContractsConfig, database::DBConfig, eth_client::ETHClientConfig, + eth_sender::ETHSenderConfig, eth_sender::GasAdjusterConfig, eth_watch::ETHWatchConfig, + fetcher::FetcherConfig, fri_prover::FriProverConfig, + fri_witness_generator::FriWitnessGeneratorConfig, nfs::NfsConfig, object_store::ObjectStoreConfig, prover::ProverConfig, prover::ProverConfigs, - prover_group::ProverGroupConfig, utils::Prometheus, witness_generator::WitnessGeneratorConfig, + prover_group::ProverGroupConfig, utils::PrometheusConfig, + witness_generator::WitnessGeneratorConfig, }; +use serde::de::DeserializeOwned; + +pub mod alerts; pub mod api; pub mod chain; pub mod circuit_synthesizer; @@ -18,6 +24,9 @@ pub mod eth_client; pub mod eth_sender; pub mod eth_watch; pub mod fetcher; +pub mod fri_prover; +pub mod fri_prover_group; +pub mod fri_witness_generator; pub mod house_keeper; pub mod nfs; pub mod object_store; @@ -29,16 +38,13 @@ pub mod witness_generator; #[cfg(test)] pub(crate) mod test_utils; -/// Convenience macro that loads the structure from the environment variable given the prefix. -/// -/// # Panics -/// +/// Convenience function that loads the structure from the environment variable given the prefix. /// Panics if the config cannot be loaded from the environment variables. -#[macro_export] -macro_rules! envy_load { - ($name:expr, $prefix:expr) => { - envy::prefixed($prefix) - .from_env() - .unwrap_or_else(|err| panic!("Cannot load config <{}>: {}", $name, err)) - }; +pub fn envy_load(name: &str, prefix: &str) -> T { + envy_try_load(prefix).unwrap_or_else(|err| panic!("Cannot load config <{}>: {}", name, err)) +} + +/// Convenience function that loads the structure from the environment variable given the prefix. +pub fn envy_try_load(prefix: &str) -> Result { + envy::prefixed(prefix).from_env() } diff --git a/core/lib/config/src/configs/nfs.rs b/core/lib/config/src/configs/nfs.rs index 9232f96d5c76..a01a870ef88d 100644 --- a/core/lib/config/src/configs/nfs.rs +++ b/core/lib/config/src/configs/nfs.rs @@ -1,4 +1,4 @@ -use crate::envy_load; +use super::envy_load; use serde::Deserialize; /// Configuration for the Network file system. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -8,7 +8,7 @@ pub struct NfsConfig { impl NfsConfig { pub fn from_env() -> Self { - envy_load!("nfs", "NFS_") + envy_load("nfs", "NFS_") } } diff --git a/core/lib/config/src/configs/object_store.rs b/core/lib/config/src/configs/object_store.rs index c1bf36f87eb3..afca2cf7fb34 100644 --- a/core/lib/config/src/configs/object_store.rs +++ b/core/lib/config/src/configs/object_store.rs @@ -1,4 +1,4 @@ -use crate::envy_load; +use super::envy_load; use serde::Deserialize; #[derive(Debug, Deserialize, Eq, PartialEq, Clone, Copy)] @@ -20,7 +20,11 @@ pub struct ObjectStoreConfig { impl ObjectStoreConfig { pub fn from_env() -> Self { - envy_load!("object_store", "OBJECT_STORE_") + envy_load("object_store", "OBJECT_STORE_") + } + + pub fn public_from_env() -> Self { + envy_load("public_object_store", "PUBLIC_OBJECT_STORE_") } } @@ -29,9 +33,9 @@ mod tests { use super::*; use crate::configs::test_utils::set_env; - fn expected_config() -> ObjectStoreConfig { + fn expected_config(bucket_base_url: &str) -> ObjectStoreConfig { ObjectStoreConfig { - bucket_base_url: "/base/url".to_string(), + bucket_base_url: bucket_base_url.to_string(), mode: ObjectStoreMode::FileBacked, file_backed_base_path: "artifacts".to_string(), gcs_credential_file_path: "/path/to/credentials.json".to_string(), @@ -50,6 +54,20 @@ OBJECT_STORE_MAX_RETRIES="5" "#; set_env(config); let actual = ObjectStoreConfig::from_env(); - assert_eq!(actual, expected_config()); + assert_eq!(actual, expected_config("/base/url")); + } + + #[test] + fn public_bucket_config_from_env() { + let config = r#" +PUBLIC_OBJECT_STORE_BUCKET_BASE_URL="/public_base_url" +PUBLIC_OBJECT_STORE_MODE="FileBacked" +PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" +PUBLIC_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" +PUBLIC_OBJECT_STORE_MAX_RETRIES="5" + "#; + set_env(config); + let actual = ObjectStoreConfig::public_from_env(); + assert_eq!(actual, expected_config("/public_base_url")); } } diff --git a/core/lib/config/src/configs/prover.rs b/core/lib/config/src/configs/prover.rs index 7b49823541eb..38e038839d97 100644 --- a/core/lib/config/src/configs/prover.rs +++ b/core/lib/config/src/configs/prover.rs @@ -5,7 +5,7 @@ use std::time::Duration; use serde::Deserialize; // Local uses -use crate::envy_load; +use super::envy_load; /// Configuration for the prover application #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -68,22 +68,19 @@ impl ProverConfig { impl ProverConfigs { pub fn from_env() -> Self { Self { - non_gpu: envy_load!("non_gpu", "PROVER_NON_GPU_"), - two_gpu_forty_gb_mem: envy_load!( - "two_gpu_forty_gb_mem", - "PROVER_TWO_GPU_FORTY_GB_MEM_" - ), - one_gpu_eighty_gb_mem: envy_load!( + non_gpu: envy_load("non_gpu", "PROVER_NON_GPU_"), + two_gpu_forty_gb_mem: envy_load("two_gpu_forty_gb_mem", "PROVER_TWO_GPU_FORTY_GB_MEM_"), + one_gpu_eighty_gb_mem: envy_load( "one_gpu_eighty_gb_mem", - "PROVER_ONE_GPU_EIGHTY_GB_MEM_" + "PROVER_ONE_GPU_EIGHTY_GB_MEM_", ), - two_gpu_eighty_gb_mem: envy_load!( + two_gpu_eighty_gb_mem: envy_load( "two_gpu_eighty_gb_mem", - "PROVER_TWO_GPU_EIGHTY_GB_MEM_" + "PROVER_TWO_GPU_EIGHTY_GB_MEM_", ), - four_gpu_eighty_gb_mem: envy_load!( + four_gpu_eighty_gb_mem: envy_load( "four_gpu_eighty_gb_mem", - "PROVER_FOUR_GPU_EIGHTY_GB_MEM_" + "PROVER_FOUR_GPU_EIGHTY_GB_MEM_", ), } } diff --git a/core/lib/config/src/configs/prover_group.rs b/core/lib/config/src/configs/prover_group.rs index 8e10b83f9769..4e1d402fd550 100644 --- a/core/lib/config/src/configs/prover_group.rs +++ b/core/lib/config/src/configs/prover_group.rs @@ -1,6 +1,6 @@ use serde::Deserialize; -use crate::envy_load; +use super::envy_load; /// Configuration for the grouping of specialized provers. /// This config would be used by circuit-synthesizer and provers. @@ -27,7 +27,7 @@ pub struct ProverGroupConfig { impl ProverGroupConfig { pub fn from_env() -> Self { - envy_load!("prover_group", "PROVER_GROUP_") + envy_load("prover_group", "PROVER_GROUP_") } pub fn get_circuit_ids_for_group_id(&self, group_id: u8) -> Option> { diff --git a/core/lib/config/src/configs/utils.rs b/core/lib/config/src/configs/utils.rs index 78711201328f..2d091dc59b0c 100644 --- a/core/lib/config/src/configs/utils.rs +++ b/core/lib/config/src/configs/utils.rs @@ -1,8 +1,9 @@ +use crate::configs::envy_load; use serde::Deserialize; use std::time::Duration; #[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct Prometheus { +pub struct PrometheusConfig { /// Port to which the Prometheus exporter server is listening. pub listener_port: u16, /// Url of Pushgateway. @@ -11,7 +12,10 @@ pub struct Prometheus { pub push_interval_ms: Option, } -impl Prometheus { +impl PrometheusConfig { + pub fn from_env() -> Self { + envy_load("prometheus", "API_PROMETHEUS_") + } pub fn push_interval(&self) -> Duration { Duration::from_millis(self.push_interval_ms.unwrap_or(100)) } diff --git a/core/lib/config/src/configs/witness_generator.rs b/core/lib/config/src/configs/witness_generator.rs index 3e5a17ca886c..b44b925a9b4b 100644 --- a/core/lib/config/src/configs/witness_generator.rs +++ b/core/lib/config/src/configs/witness_generator.rs @@ -5,7 +5,7 @@ use std::time::Duration; use serde::Deserialize; // Local uses -use crate::envy_load; +use super::envy_load; /// Configuration for the witness generation #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -30,7 +30,7 @@ pub struct WitnessGeneratorConfig { impl WitnessGeneratorConfig { pub fn from_env() -> Self { - envy_load!("witness", "WITNESS_") + envy_load("witness", "WITNESS_") } pub fn witness_generation_timeout(&self) -> Duration { diff --git a/core/lib/config/src/constants/blocks.rs b/core/lib/config/src/constants/blocks.rs index d48d3007bc7c..7579b408f0c0 100644 --- a/core/lib/config/src/constants/blocks.rs +++ b/core/lib/config/src/constants/blocks.rs @@ -1,6 +1,6 @@ use zksync_basic_types::H256; -// Be design we don't have a term: uncle blocks. Hence we have to use rlp hash +// By design we don't have a term: uncle blocks. Hence we have to use rlp hash // from empty list for ethereum compatibility. pub const EMPTY_UNCLES_HASH: H256 = H256([ 0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 8514f9614c50..5c31df1ecadf 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,7 +1,5 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use serde::Deserialize; - pub use crate::configs::{ ApiConfig, ChainConfig, ContractVerifierConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, ETHWatchConfig, FetcherConfig, GasAdjusterConfig, ObjectStoreConfig, @@ -11,38 +9,3 @@ pub use crate::configs::{ pub mod configs; pub mod constants; pub mod test_config; - -#[derive(Debug, Deserialize, Clone, PartialEq)] -pub struct ZkSyncConfig { - pub api: ApiConfig, - pub chain: ChainConfig, - pub contracts: ContractsConfig, - pub db: DBConfig, - pub eth_client: ETHClientConfig, - pub eth_sender: ETHSenderConfig, - pub eth_watch: ETHWatchConfig, - pub fetcher: FetcherConfig, - pub prover: ProverConfigs, - pub object_store: ObjectStoreConfig, -} - -impl ZkSyncConfig { - pub fn from_env() -> Self { - Self { - api: ApiConfig::from_env(), - chain: ChainConfig::from_env(), - contracts: ContractsConfig::from_env(), - db: DBConfig::from_env(), - eth_client: ETHClientConfig::from_env(), - eth_sender: ETHSenderConfig::from_env(), - eth_watch: ETHWatchConfig::from_env(), - fetcher: FetcherConfig::from_env(), - prover: ProverConfigs::from_env(), - object_store: ObjectStoreConfig::from_env(), - } - } - - pub fn default_db() -> DBConfig { - DBConfig::default() - } -} diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index 61cde99c42bf..cd15964c9345 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -5,14 +5,15 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] +zksync_utils = { path = "../utils", version = "1.0" } + ethabi = "16.0.0" serde_json = "1.0" serde = "1.0" -zksync_utils = { path = "../utils", version = "1.0" } once_cell = "1.7" hex = "0.4" diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a632bee74d26..c7a7b5d68314 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -1,14 +1,16 @@ #![allow(clippy::derive_partial_eq_without_eq)] - -use ethabi::ethereum_types::{H256, U256}; -use ethabi::Contract; +use ethabi::{ + ethereum_types::{H256, U256}, + Contract, +}; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; +use std::{ + fs::{self, File}, + path::Path, +}; -use std::fs::{self, File}; -use std::path::Path; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::bytes_to_be_words; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; #[derive(Debug)] pub enum ContractLanguage { @@ -242,12 +244,11 @@ pub static ESTIMATE_FEE_BLOCK_CODE: Lazy = Lazy::new(|| { }); impl BaseSystemContracts { - pub fn load_from_disk() -> Self { - let bytecode = read_proved_block_bootloader_bytecode(); - let hash = hash_bytecode(&bytecode); + fn load_with_bootloader(bootloader_bytecode: Vec) -> Self { + let hash = hash_bytecode(&bootloader_bytecode); let bootloader = SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytes_to_be_words(bootloader_bytecode), hash, }; @@ -264,6 +265,17 @@ impl BaseSystemContracts { default_aa, } } + // BaseSystemContracts with proved bootloader - for handling transactions. + pub fn load_from_disk() -> Self { + let bootloader_bytecode = read_proved_block_bootloader_bytecode(); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + + /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. + pub fn playground() -> Self { + let bootloader_bytecode = read_playground_block_bootloader_bytecode(); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { diff --git a/core/lib/crypto/Cargo.toml b/core/lib/crypto/Cargo.toml index c012d11ab3b2..f79058d3a000 100644 --- a/core/lib/crypto/Cargo.toml +++ b/core/lib/crypto/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] readme = "README.md" diff --git a/core/lib/crypto/src/hasher/blake2.rs b/core/lib/crypto/src/hasher/blake2.rs index 3bf9e3a794b4..70d8c9797e8e 100644 --- a/core/lib/crypto/src/hasher/blake2.rs +++ b/core/lib/crypto/src/hasher/blake2.rs @@ -6,26 +6,12 @@ use zksync_basic_types::H256; #[derive(Default, Clone, Debug)] pub struct Blake2Hasher; -impl Hasher> for Blake2Hasher { - /// Gets the hash of the byte sequence. - fn hash_bytes>(&self, value: I) -> Vec { - >::hash_bytes(self, value).0.into() - } - - /// Merges two hashes into one. - fn compress(&self, lhs: &Vec, rhs: &Vec) -> Vec { - let mut hasher = Blake2s256::new(); - hasher.update(lhs); - hasher.update(rhs); - hasher.finalize().to_vec() - } -} +impl Hasher for Blake2Hasher { + type Hash = H256; -impl Hasher for Blake2Hasher { - fn hash_bytes>(&self, value: I) -> H256 { + fn hash_bytes(&self, value: &[u8]) -> H256 { let mut hasher = Blake2s256::new(); - let value: Vec = value.into_iter().collect(); - hasher.update(&value); + hasher.update(value); H256(hasher.finalize().into()) } diff --git a/core/lib/crypto/src/hasher/keccak.rs b/core/lib/crypto/src/hasher/keccak.rs index f3191cf1c03e..e4c441328de9 100644 --- a/core/lib/crypto/src/hasher/keccak.rs +++ b/core/lib/crypto/src/hasher/keccak.rs @@ -1,28 +1,20 @@ use crate::hasher::Hasher; -use zksync_basic_types::web3::signing::keccak256; +use zksync_basic_types::{web3::signing::keccak256, H256}; #[derive(Default, Clone, Debug)] pub struct KeccakHasher; -impl Hasher> for KeccakHasher { - /// Gets the hash of the byte sequence. - fn hash_bytes>(&self, value: I) -> Vec { - let value: Vec = value.into_iter().collect(); - keccak256(&value).to_vec() - } +impl Hasher for KeccakHasher { + type Hash = H256; - /// Get the hash of the hashes sequence. - fn hash_elements>>(&self, elements: I) -> Vec { - let elems: Vec = elements.into_iter().flatten().collect(); - keccak256(&elems).to_vec() + fn hash_bytes(&self, value: &[u8]) -> Self::Hash { + H256(keccak256(value)) } - /// Merges two hashes into one. - fn compress(&self, lhs: &Vec, rhs: &Vec) -> Vec { - let mut elems = vec![]; - elems.extend(lhs); - elems.extend(rhs); - - keccak256(&elems).to_vec() + fn compress(&self, lhs: &Self::Hash, rhs: &Self::Hash) -> Self::Hash { + let mut bytes = [0_u8; 64]; + bytes[..32].copy_from_slice(&lhs.0); + bytes[32..].copy_from_slice(&rhs.0); + H256(keccak256(&bytes)) } } diff --git a/core/lib/crypto/src/hasher/mod.rs b/core/lib/crypto/src/hasher/mod.rs index 67fbf569c5f1..857d489a9d4c 100644 --- a/core/lib/crypto/src/hasher/mod.rs +++ b/core/lib/crypto/src/hasher/mod.rs @@ -3,28 +3,12 @@ pub mod keccak; pub mod sha256; /// Definition of hasher suitable for calculating state hash. -/// -/// # Panics -/// -/// This structure expects input data to be correct, as it's main usage is the Merkle tree maintenance, -/// which assumes the consistent state. -/// It means that caller is responsible for checking that input values are actually valid, e.g. for `Vec` -/// it must be checked that byte sequence can be deserialized to hash object expected by the chosen hasher -/// implementation. -/// -/// What it *actually* means, that is incorrect input data will cause the code to panic. -pub trait Hasher { - /// Gets the hash of the byte sequence. - fn hash_bytes>(&self, value: I) -> Hash; +pub trait Hasher { + type Hash: AsRef<[u8]>; - /// Get the hash of the hashes sequence. - fn hash_elements>(&self, elements: I) -> Hash - where - Hash: IntoIterator, - { - self.hash_bytes(elements.into_iter().flatten()) - } + /// Gets the hash of the byte sequence. + fn hash_bytes(&self, value: &[u8]) -> Self::Hash; /// Merges two hashes into one. - fn compress(&self, lhs: &Hash, rhs: &Hash) -> Hash; + fn compress(&self, lhs: &Self::Hash, rhs: &Self::Hash) -> Self::Hash; } diff --git a/core/lib/crypto/src/hasher/sha256.rs b/core/lib/crypto/src/hasher/sha256.rs index ef42ecc632c5..73e593ead72e 100644 --- a/core/lib/crypto/src/hasher/sha256.rs +++ b/core/lib/crypto/src/hasher/sha256.rs @@ -1,36 +1,24 @@ -use crate::hasher::Hasher; use sha2::{Digest, Sha256}; -#[derive(Default, Clone, Debug)] -pub struct Sha256Hasher; - -impl Hasher> for Sha256Hasher { - /// Gets the hash of the byte sequence. - fn hash_bytes>(&self, value: I) -> Vec { - let mut sha256 = Sha256::new(); - let value: Vec = value.into_iter().collect(); - sha256.update(&value); +use crate::hasher::Hasher; +use zksync_basic_types::H256; - sha256.finalize().to_vec() - } +#[derive(Debug, Default, Clone, Copy)] +pub struct Sha256Hasher; - /// Get the hash of the hashes sequence. - fn hash_elements>>(&self, elements: I) -> Vec { - let elems: Vec = elements.into_iter().flatten().collect(); +impl Hasher for Sha256Hasher { + type Hash = H256; + fn hash_bytes(&self, value: &[u8]) -> Self::Hash { let mut sha256 = Sha256::new(); - sha256.update(&elems); - sha256.finalize().to_vec() + sha256.update(value); + H256(sha256.finalize().into()) } - /// Merges two hashes into one. - fn compress(&self, lhs: &Vec, rhs: &Vec) -> Vec { - let mut elems = vec![]; - elems.extend(lhs); - elems.extend(rhs); - - let mut sha256 = Sha256::new(); - sha256.update(&elems); - sha256.finalize().to_vec() + fn compress(&self, lhs: &Self::Hash, rhs: &Self::Hash) -> Self::Hash { + let mut hasher = Sha256::new(); + hasher.update(lhs.as_ref()); + hasher.update(rhs.as_ref()); + H256(hasher.finalize().into()) } } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 4310de58e2b2..2aa15d70fa99 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -5,29 +5,25 @@ edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] vlog = { path = "../../lib/vlog", version = "1.0" } -vm = { path = "../vm", version = "0.1.0" } zksync_utils = { path = "../utils", version = "1.0" } zksync_config = { path = "../config", version = "1.0" } zksync_contracts = { path = "../contracts", version = "1.0" } zksync_types = { path = "../types", version = "1.0" } -zksync_state = { path = "../state", version = "1.0" } -zksync_storage = { path = "../storage", version = "1.0" } -zksync_web3_decl = { path = "../web3_decl", version = "1.0" } zksync_health_check = { path = "../health_check", version = "0.1.0" } itertools = "0.10.1" thiserror = "1.0" anyhow = "1.0" metrics = "0.20" -async-std = "1.12.0" +tokio = { version = "1", features = ["time"] } sqlx = { version = "0.5", default-features = false, features = [ - "runtime-async-std-native-tls", + "runtime-tokio-native-tls", "macros", "postgres", "bigdecimal", @@ -43,8 +39,7 @@ bincode = "1" num = "0.3.1" hex = "0.4" once_cell = "1.7" - +strum = { version = "0.24", features = ["derive"] } [dev-dependencies] db_test_macro = { path = "../db_test_macro", version = "0.1.0" } -tokio = { version = "1", features = ["time"] } #criterion = "0.3.0" diff --git a/core/lib/dal/README.md b/core/lib/dal/README.md index 62dc5e9aa125..d86aa8c655e1 100644 --- a/core/lib/dal/README.md +++ b/core/lib/dal/README.md @@ -9,11 +9,10 @@ Current schema is managed by `diesel` - that applies all the schema changes from ### Storage tables -| Table name | Description | Usage | -| ---------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | -| contract_sources | Mapping from contract address to the contract source. | Currently set via `zks_setContractDebugInfo` JSON call | -| storage | Main storage column: mapping from hashed StorageKey (account + key) to the value. | We also store additional columns there (like transaction hash or creation time). | -| storage_logs | Stores all the storage access logs for all the transactions. | Main source of truth - other columns (like `storage`) are created by compacting this column. Its primary index is (storage key, mini_block, operation_id) | +| Table name | Description | Usage | +| ------------ | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| storage | Main storage column: mapping from hashed StorageKey (account + key) to the value. | We also store additional columns there (like transaction hash or creation time). | +| storage_logs | Stores all the storage access logs for all the transactions. | Main source of truth - other columns (like `storage`) are created by compacting this column. Its primary index is (storage key, mini_block, operation_id) | ### Prover queue tables diff --git a/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.down.sql b/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.down.sql new file mode 100644 index 000000000000..d137348049d0 --- /dev/null +++ b/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS witness_inputs_fri; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.up.sql b/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.up.sql new file mode 100644 index 000000000000..24bb5cb1e952 --- /dev/null +++ b/core/lib/dal/migrations/20230607131138_create_witness_inputs_fri_table.up.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS witness_inputs_fri +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + merkle_tree_paths_blob_url TEXT, + attempts SMALLINT NOT NULL DEFAULT 0, + status TEXT NOT NULL, + error TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP, + time_taken TIME, + is_blob_cleaned BOOLEAN + ); diff --git a/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.down.sql new file mode 100644 index 000000000000..a0f9854d3c5b --- /dev/null +++ b/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS prover_jobs_fri; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.up.sql new file mode 100644 index 000000000000..a84261d4fb6d --- /dev/null +++ b/core/lib/dal/migrations/20230607132716_create_prover_jobs_fri_table.up.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS prover_jobs_fri +( + id BIGSERIAL PRIMARY KEY, + l1_batch_number BIGINT NOT NULL REFERENCES l1_batches (number) ON DELETE CASCADE, + circuit_id SMALLINT NOT NULL, + circuit_blob_url TEXT NOT NULL, + aggregation_round SMALLINT NOT NULL, + sequence_number INT NOT NULL, + proof BYTEA, + status TEXT NOT NULL, + error TEXT, + attempts SMALLINT NOT NULL DEFAULT 0, + processing_started_at TIMESTAMP, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + time_taken TIME, + is_blob_cleaned BOOLEAN + ); diff --git a/core/lib/dal/migrations/20230609133146_drop_contract_sources.down.sql b/core/lib/dal/migrations/20230609133146_drop_contract_sources.down.sql new file mode 100644 index 000000000000..626c08426a7e --- /dev/null +++ b/core/lib/dal/migrations/20230609133146_drop_contract_sources.down.sql @@ -0,0 +1,9 @@ +CREATE TABLE IF NOT EXISTS contract_sources + ( + address BYTEA PRIMARY KEY, + assembly_code TEXT NOT NULL, + pc_line_mapping JSONB NOT NULL, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL + ); diff --git a/core/lib/dal/migrations/20230609133146_drop_contract_sources.up.sql b/core/lib/dal/migrations/20230609133146_drop_contract_sources.up.sql new file mode 100644 index 000000000000..2e28b324ba66 --- /dev/null +++ b/core/lib/dal/migrations/20230609133146_drop_contract_sources.up.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS contract_sources; diff --git a/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.down.sql b/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.down.sql new file mode 100644 index 000000000000..dd31ef855cce --- /dev/null +++ b/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE witness_inputs ADD CONSTRAINT witness_inputs_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number); \ No newline at end of file diff --git a/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.up.sql b/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.up.sql new file mode 100644 index 000000000000..88118ace45b3 --- /dev/null +++ b/core/lib/dal/migrations/20230612111554_drop_unnecessary_foreign_keys_post_prover_migration.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE witness_inputs DROP CONSTRAINT witness_inputs_l1_batch_number_fkey; + diff --git a/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.down.sql b/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.down.sql new file mode 100644 index 000000000000..4f4ea0c9640f --- /dev/null +++ b/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS storage_logs_contract_address_tx_hash_idx_upd; +CREATE INDEX IF NOT EXISTS storage_logs_contract_address_tx_hash_idx ON storage_logs (address, tx_hash) WHERE (address = '\x0000000000000000000000000000000000008002'::bytea); diff --git a/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.up.sql b/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.up.sql new file mode 100644 index 000000000000..235be5fdf46a --- /dev/null +++ b/core/lib/dal/migrations/20230612173621_fix-storage-logs-contract-address-tx-hash-idx.up.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS storage_logs_contract_address_tx_hash_idx; +CREATE INDEX IF NOT EXISTS storage_logs_contract_address_tx_hash_idx_upd ON storage_logs (tx_hash) WHERE (address = '\x0000000000000000000000000000000000008002'::bytea); diff --git a/core/lib/dal/migrations/20230614081056_add_missing_indices.down.sql b/core/lib/dal/migrations/20230614081056_add_missing_indices.down.sql new file mode 100644 index 000000000000..e26ddd060ba9 --- /dev/null +++ b/core/lib/dal/migrations/20230614081056_add_missing_indices.down.sql @@ -0,0 +1,17 @@ +DROP INDEX IF EXISTS ix_events_t1; +DROP INDEX IF EXISTS ix_initial_writes_t1; +DROP INDEX IF EXISTS ix_miniblocks_t1; + +DROP INDEX IF EXISTS ix_prover_jobs_circuits_0; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_1; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_2; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_3; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_4; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_5; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_6; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_7; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_8; +DROP INDEX IF EXISTS ix_prover_jobs_circuits_9; + +DROP INDEX IF EXISTS ix_prover_jobs_t2; +DROP INDEX IF EXISTS ix_prover_jobs_t3; diff --git a/core/lib/dal/migrations/20230614081056_add_missing_indices.up.sql b/core/lib/dal/migrations/20230614081056_add_missing_indices.up.sql new file mode 100644 index 000000000000..6cf71780f85b --- /dev/null +++ b/core/lib/dal/migrations/20230614081056_add_missing_indices.up.sql @@ -0,0 +1,17 @@ +CREATE INDEX IF NOT EXISTS ix_events_t1 ON public.events USING btree (topic1, address, tx_hash); +CREATE INDEX IF NOT EXISTS ix_initial_writes_t1 ON public.initial_writes USING btree (hashed_key) INCLUDE (l1_batch_number); +CREATE INDEX IF NOT EXISTS ix_miniblocks_t1 ON public.miniblocks USING btree (number) INCLUDE (l1_batch_number, "timestamp"); + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_0 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{Scheduler,"L1 messages merklizer"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_1 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Node aggregation","Decommitts sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_2 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Leaf aggregation","Code decommitter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_3 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Log demuxer",Keccak}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_4 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{SHA256,ECRecover}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_5 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"RAM permutation","Storage sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_6 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Storage application","Initial writes pubdata rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_7 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Repeated writes pubdata rehasher","Events sorter"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_8 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"L1 messages sorter","L1 messages rehasher"}'::text[]))); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_circuits_9 ON public.prover_jobs USING btree (aggregation_round DESC, l1_batch_number, id) WHERE ((status = 'queued'::text) AND (circuit_type = ANY ('{"Main VM"}'::text[]))); + +CREATE INDEX IF NOT EXISTS ix_prover_jobs_t2 ON public.prover_jobs USING btree (l1_batch_number, aggregation_round) WHERE ((status = 'successful'::text) OR (aggregation_round < 3)); +CREATE INDEX IF NOT EXISTS ix_prover_jobs_t3 ON public.prover_jobs USING btree (aggregation_round, l1_batch_number) WHERE (status = 'successful'::text); diff --git a/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.down.sql b/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.down.sql new file mode 100644 index 000000000000..ffa2061de8b8 --- /dev/null +++ b/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE witness_inputs_fri ADD CONSTRAINT witness_inputs_fri_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) REFERENCES l1_batches (number); \ No newline at end of file diff --git a/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.up.sql b/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.up.sql new file mode 100644 index 000000000000..68e14f6b3c3b --- /dev/null +++ b/core/lib/dal/migrations/20230615142357_drop_l1_batches_constraint_from_witness_inputs_fri.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE witness_inputs_fri DROP CONSTRAINT witness_inputs_fri_l1_batch_number_fkey; + diff --git a/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.down.sql b/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.down.sql new file mode 100644 index 000000000000..95cb2255bb68 --- /dev/null +++ b/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS witness_inputleaf_aggregation_witness_jobs_fris_fri; diff --git a/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.up.sql b/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.up.sql new file mode 100644 index 000000000000..a0f0fce87dcd --- /dev/null +++ b/core/lib/dal/migrations/20230616131252_add_leaf_aggregation_witness_jobs_fri.up.sql @@ -0,0 +1,16 @@ +-- Add up migration script here +CREATE TABLE IF NOT EXISTS leaf_aggregation_witness_jobs_fri +( + id BIGSERIAL PRIMARY KEY, + l1_batch_number BIGINT NOT NULL, + circuit_id SMALLINT NOT NULL, + closed_form_inputs_blob_url TEXT, + attempts SMALLINT NOT NULL DEFAULT 0, + status TEXT NOT NULL, + error TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + processing_started_at TIMESTAMP, + time_taken TIME, + is_blob_cleaned BOOLEAN +); diff --git a/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.down.sql new file mode 100644 index 000000000000..6b33271c70d3 --- /dev/null +++ b/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS node_aggregation_witness_jobs_fri; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.up.sql new file mode 100644 index 000000000000..5dd235d26794 --- /dev/null +++ b/core/lib/dal/migrations/20230619132736_add_node_aggregation_witness_jobs_fri_table.up.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS node_aggregation_witness_jobs_fri +( + id BIGSERIAL PRIMARY KEY, + l1_batch_number BIGINT NOT NULL, + circuit_id SMALLINT NOT NULL, + depth INT NOT NULL DEFAULT 0, + status TEXT NOT NULL, + attempts SMALLINT NOT NULL DEFAULT 0, + aggregations_url TEXT, + processing_started_at TIMESTAMP, + time_taken TIME, + error TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.down.sql new file mode 100644 index 000000000000..ee041ca9ee72 --- /dev/null +++ b/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.down.sql @@ -0,0 +1 @@ +ALTER TABLE leaf_aggregation_witness_jobs_fri DROP COLUMN IF EXISTS number_of_basic_circuits; diff --git a/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.up.sql new file mode 100644 index 000000000000..fdee3838c83b --- /dev/null +++ b/core/lib/dal/migrations/20230622100931_add_number_of_basic_circuits_in_leaf_agg_jobs_fri_table.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE leaf_aggregation_witness_jobs_fri ADD COLUMN IF NOT EXISTS number_of_basic_circuits INT; + diff --git a/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.down.sql new file mode 100644 index 000000000000..c3b77fef69cd --- /dev/null +++ b/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.down.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri DROP COLUMN IF EXISTS depth; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.up.sql new file mode 100644 index 000000000000..0ae73d45c4fe --- /dev/null +++ b/core/lib/dal/migrations/20230622142030_add_depth_in_prover_jobs_fri_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS depth INT NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230626060855_vyper_verification.down.sql b/core/lib/dal/migrations/20230626060855_vyper_verification.down.sql new file mode 100644 index 000000000000..cd7888316b53 --- /dev/null +++ b/core/lib/dal/migrations/20230626060855_vyper_verification.down.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS compiler_versions; + +ALTER TABLE contract_verification_requests DROP COLUMN IF EXISTS optimizer_mode; +ALTER TABLE contract_verification_requests RENAME COLUMN compiler_version TO compiler_solc_version; +ALTER TABLE contract_verification_requests RENAME COLUMN zk_compiler_version TO compiler_zksolc_version; + +CREATE TABLE IF NOT EXISTS contract_verification_zksolc_versions ( + version TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); +CREATE TABLE IF NOT EXISTS contract_verification_solc_versions ( + version TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20230626060855_vyper_verification.up.sql b/core/lib/dal/migrations/20230626060855_vyper_verification.up.sql new file mode 100644 index 000000000000..240fec31054c --- /dev/null +++ b/core/lib/dal/migrations/20230626060855_vyper_verification.up.sql @@ -0,0 +1,14 @@ +ALTER TABLE contract_verification_requests RENAME COLUMN compiler_solc_version TO compiler_version; +ALTER TABLE contract_verification_requests RENAME COLUMN compiler_zksolc_version TO zk_compiler_version; +ALTER TABLE contract_verification_requests ADD COLUMN IF NOT EXISTS optimizer_mode TEXT; + +DROP TABLE IF EXISTS contract_verification_zksolc_versions; +DROP TABLE IF EXISTS contract_verification_solc_versions; + +CREATE TABLE IF NOT EXISTS compiler_versions ( + version TEXT NOT NULL, + compiler TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + PRIMARY KEY (version, compiler) +); diff --git a/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.down.sql b/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.down.sql new file mode 100644 index 000000000000..13bcda38e22c --- /dev/null +++ b/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.down.sql @@ -0,0 +1,3 @@ +DROP INDEX IF EXISTS prover_jobs_fri_composite_index; +DROP INDEX IF EXISTS leaf_aggregation_witness_jobs_fri_composite_index; +DROP INDEX IF EXISTS node_aggregation_witness_jobs_fri_composite_index; diff --git a/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.up.sql b/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.up.sql new file mode 100644 index 000000000000..559768891d49 --- /dev/null +++ b/core/lib/dal/migrations/20230626103610_create_unique_index_for_leaf_node_prover_fri_tables.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS prover_jobs_fri_composite_index ON prover_jobs_fri(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number); +CREATE UNIQUE INDEX IF NOT EXISTS leaf_aggregation_witness_jobs_fri_composite_index ON leaf_aggregation_witness_jobs_fri(l1_batch_number, circuit_id); +CREATE UNIQUE INDEX IF NOT EXISTS node_aggregation_witness_jobs_fri_composite_index ON node_aggregation_witness_jobs_fri(l1_batch_number, circuit_id, depth); \ No newline at end of file diff --git a/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.down.sql b/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.down.sql new file mode 100644 index 000000000000..83deeca00f2b --- /dev/null +++ b/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS scheduler_dependency_tracker_fri; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.up.sql b/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.up.sql new file mode 100644 index 000000000000..ef2aca34f512 --- /dev/null +++ b/core/lib/dal/migrations/20230627123428_add_scheduler_dependency_tracker_table.up.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS scheduler_dependency_tracker_fri +( + l1_batch_number BIGINT PRIMARY KEY, + status TEXT NOT NULL, + circuit_1_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_2_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_3_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_4_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_5_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_6_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_7_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_8_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_9_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_10_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_11_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_12_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + circuit_13_final_prover_job_id BIGSERIAL REFERENCES prover_jobs_fri (id) ON DELETE CASCADE, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); \ No newline at end of file diff --git a/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.down.sql new file mode 100644 index 000000000000..0ef01ca7fe5a --- /dev/null +++ b/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS scheduler_witness_jobs_fri; \ No newline at end of file diff --git a/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.up.sql new file mode 100644 index 000000000000..8e7f772925ca --- /dev/null +++ b/core/lib/dal/migrations/20230627131556_create_scheduler_witness_jobs_fri_table.up.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS scheduler_witness_jobs_fri +( + l1_batch_number BIGINT PRIMARY KEY, + scheduler_partial_input_blob_url TEXT NOT NULL, + status TEXT NOT NULL, + processing_started_at TIMESTAMP, + time_taken TIME, + error TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.down.sql new file mode 100644 index 000000000000..90b709e259ff --- /dev/null +++ b/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.down.sql @@ -0,0 +1 @@ +ALTER TABLE scheduler_witness_jobs_fri drop COLUMN IF EXISTS attempts; diff --git a/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.up.sql new file mode 100644 index 000000000000..f57cd4bc970a --- /dev/null +++ b/core/lib/dal/migrations/20230628091834_add_attempts_in_scheduler_witness_jobs_fri_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE scheduler_witness_jobs_fri ADD COLUMN IF NOT EXISTS attempts SMALLINT NOT NULL DEFAULT 0; diff --git a/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.down.sql b/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.down.sql new file mode 100644 index 000000000000..9c81d4d097d2 --- /dev/null +++ b/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.down.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri DROP COLUMN IF EXISTS is_node_final_proof; diff --git a/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.up.sql b/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.up.sql new file mode 100644 index 000000000000..ac0fb75b3566 --- /dev/null +++ b/core/lib/dal/migrations/20230628113801_add_is_node_final_proof_column_in_prover_jobs_fri_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS is_node_final_proof BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.down.sql b/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.down.sql new file mode 100644 index 000000000000..47da60bb39ff --- /dev/null +++ b/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.down.sql @@ -0,0 +1 @@ +ALTER TABLE node_aggregation_witness_jobs_fri DROP COLUMN IF EXISTS number_of_dependent_jobs; diff --git a/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.up.sql b/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.up.sql new file mode 100644 index 000000000000..4c1780d54e22 --- /dev/null +++ b/core/lib/dal/migrations/20230628120840_add_number_of_dependency_column_in_node_aggregations_fri_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE node_aggregation_witness_jobs_fri ADD COLUMN IF NOT EXISTS number_of_dependent_jobs INTEGER; diff --git a/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.down.sql b/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.down.sql new file mode 100644 index 000000000000..b83965b49454 --- /dev/null +++ b/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.down.sql @@ -0,0 +1,29 @@ +ALTER TABLE scheduler_dependency_tracker_fri + ALTER COLUMN circuit_1_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_2_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_3_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_4_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_5_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_6_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_7_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_8_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_9_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_10_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_11_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_12_final_prover_job_id TYPE BIGSERIAL, + ALTER COLUMN circuit_13_final_prover_job_id TYPE BIGSERIAL; + +ALTER TABLE scheduler_dependency_tracker_fri + ADD FOREIGN KEY (circuit_1_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_2_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_3_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_4_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_5_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_6_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_7_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_8_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_9_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_10_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_11_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_12_final_prover_job_id) REFERENCES prover_jobs_fri (id), + ADD FOREIGN KEY (circuit_13_final_prover_job_id) REFERENCES prover_jobs_fri (id); diff --git a/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.up.sql b/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.up.sql new file mode 100644 index 000000000000..80fce3aa4d23 --- /dev/null +++ b/core/lib/dal/migrations/20230628184614_update_scheduler_dependency_tracker_fri_to_remove_autoincrement_sequence.up.sql @@ -0,0 +1,43 @@ +ALTER TABLE scheduler_dependency_tracker_fri + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_1_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_2_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_3_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_4_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_5_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_6_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_7_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_8_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_9_final_prover_job_i_fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_10_final_prover_job__fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_11_final_prover_job__fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_12_final_prover_job__fkey, + DROP CONSTRAINT IF EXISTS scheduler_dependency_tracker__circuit_13_final_prover_job__fkey; + +ALTER TABLE scheduler_dependency_tracker_fri + ALTER COLUMN circuit_1_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_2_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_3_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_4_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_5_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_6_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_7_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_8_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_9_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_10_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_11_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_12_final_prover_job_id DROP NOT NULL, + ALTER COLUMN circuit_13_final_prover_job_id DROP NOT NULL; + +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_1_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_2_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_3_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_4_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_5_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_6_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_7_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_8_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_9_final_prover_job_id_seq CASCADE; +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_10_final_prover_job_i_seq CASCADE; -- it not job_id because postgres truncated more than 63 chars. +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_11_final_prover_job_i_seq CASCADE; -- it not job_id because postgres truncated more than 63 chars. +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_12_final_prover_job_i_seq CASCADE; -- it not job_id because postgres truncated more than 63 chars. +DROP SEQUENCE IF EXISTS scheduler_dependency_tracker__circuit_13_final_prover_job_i_seq CASCADE; -- it not job_id because postgres truncated more than 63 chars. \ No newline at end of file diff --git a/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.down.sql b/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.down.sql new file mode 100644 index 000000000000..3eefc802e925 --- /dev/null +++ b/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.down.sql @@ -0,0 +1,9 @@ +DROP INDEX IF EXISTS idx_prover_jobs_fri_queued_order; +DROP INDEX IF EXISTS idx_prover_jobs_fri_status_processing_attempts; +DROP INDEX IF EXISTS idx_witness_inputs_fri_status_processing_attempts; +DROP INDEX IF EXISTS idx_leaf_aggregation_witness_jobs_fri_queued_order; +DROP INDEX IF EXISTS idx_leaf_aggregation_fri_status_processing_attempts; +DROP INDEX IF EXISTS idx_node_aggregation_witness_jobs_fri_queued_order; +DROP INDEX IF EXISTS idx_node_aggregation_fri_status_processing_attempts; +DROP INDEX IF EXISTS idx_scheduler_fri_status_processing_attempts; +DROP INDEX IF EXISTS idx_scheduler_dependency_tracker_fri_circuit_ids_filtered; diff --git a/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.up.sql b/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.up.sql new file mode 100644 index 000000000000..53e5802dac3e --- /dev/null +++ b/core/lib/dal/migrations/20230630083308_add_indices_for_new_prover_related_tables.up.sql @@ -0,0 +1,52 @@ +CREATE INDEX IF NOT EXISTS idx_prover_jobs_fri_queued_order + ON prover_jobs_fri (aggregation_round DESC, l1_batch_number ASC, id ASC) + WHERE status = 'queued'; + +CREATE INDEX IF NOT EXISTS idx_prover_jobs_fri_status_processing_attempts + ON prover_jobs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); + + +CREATE INDEX IF NOT EXISTS idx_witness_inputs_fri_status_processing_attempts + ON witness_inputs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); + + +CREATE INDEX IF NOT EXISTS idx_leaf_aggregation_witness_jobs_fri_queued_order + ON leaf_aggregation_witness_jobs_fri (l1_batch_number ASC, id ASC) + WHERE status = 'queued'; + +CREATE INDEX IF NOT EXISTS idx_leaf_aggregation_fri_status_processing_attempts + ON leaf_aggregation_witness_jobs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); + + +CREATE INDEX IF NOT EXISTS idx_node_aggregation_witness_jobs_fri_queued_order + ON node_aggregation_witness_jobs_fri (l1_batch_number ASC, depth ASC, id ASC) + WHERE status = 'queued'; + +CREATE INDEX IF NOT EXISTS idx_node_aggregation_fri_status_processing_attempts + ON node_aggregation_witness_jobs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); + +CREATE INDEX IF NOT EXISTS idx_scheduler_fri_status_processing_attempts + ON scheduler_witness_jobs_fri (processing_started_at, attempts) + WHERE status IN ('in_progress', 'failed'); + +CREATE INDEX IF NOT EXISTS idx_scheduler_dependency_tracker_fri_circuit_ids_filtered + ON scheduler_dependency_tracker_fri ( + circuit_1_final_prover_job_id, + circuit_2_final_prover_job_id, + circuit_3_final_prover_job_id, + circuit_4_final_prover_job_id, + circuit_5_final_prover_job_id, + circuit_6_final_prover_job_id, + circuit_7_final_prover_job_id, + circuit_8_final_prover_job_id, + circuit_9_final_prover_job_id, + circuit_10_final_prover_job_id, + circuit_11_final_prover_job_id, + circuit_12_final_prover_job_id, + circuit_13_final_prover_job_id + ) WHERE status != 'queued'; + diff --git a/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.down.sql b/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.down.sql new file mode 100644 index 000000000000..c2088389416b --- /dev/null +++ b/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS proof BYTEA; +ALTER TABLE prover_jobs_fri DROP COLUMN IF EXISTS proof_blob_url; diff --git a/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.up.sql b/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.up.sql new file mode 100644 index 000000000000..8383991cfbae --- /dev/null +++ b/core/lib/dal/migrations/20230630095614_add_proof_blob_url_drop_proof_from_prover_jobs_fri.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE prover_jobs_fri DROP COLUMN IF EXISTS proof; +ALTER TABLE prover_jobs_fri ADD COLUMN IF NOT EXISTS proof_blob_url TEXT; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 7c48f297ea1c..1bd3f3ee7607 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -1,32 +1,5 @@ { "db": "PostgreSQL", - "0016b523dc81ee51f566cf5f226a2a0b53c51e7d02318d6c23a55eb92cfa7f94": { - "describe": { - "columns": [ - { - "name": "initial_write_l1_batch_number?", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "current_l1_batch_number?", - "ordinal": 1, - "type_info": "Int8" - } - ], - "nullable": [ - null, - null - ], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n SELECT (SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1) as \"initial_write_l1_batch_number?\",\n (SELECT miniblocks.l1_batch_number FROM miniblocks WHERE number = $2) as \"current_l1_batch_number?\"\n " - }, "01189407fab9be050ae75249f75b9503343500af700f00721e295871fa969172": { "describe": { "columns": [ @@ -220,253 +193,270 @@ }, "query": "UPDATE eth_txs\n SET confirmed_eth_tx_history_id = $1\n WHERE id = $2" }, - "077913dcb33f255fad3f6d81a46a5acad9074cf5c03216430ca1a959825a057a": { + "078adbe9a9973c96c8911725c2b2ce449f83897b324c434b04ffe4d1dd40484c": { "describe": { "columns": [ { - "name": "max", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "merkle_tree_paths_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "status", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "error", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 5, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "processing_started_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 8, + "type_info": "Time" + }, + { + "name": "is_blob_cleaned", + "ordinal": 9, + "type_info": "Bool" } ], "nullable": [ - null + false, + true, + false, + false, + true, + false, + false, + true, + true, + true ], "parameters": { - "Left": [] + "Left": [ + "Int8" + ] } }, - "query": "\n SELECT MAX(l1_batch_number) FROM witness_inputs\n WHERE merkel_tree_paths_blob_url IS NOT NULL\n " + "query": "\n UPDATE witness_inputs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs_fri\n WHERE l1_batch_number <= $1\n AND status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs_fri.*\n " }, - "07f14f401347d74b8bb3595f5ec75e6379a8af0e2e4cbd5ee78d70583925d60b": { + "0b934f7671826b45d5a6f95f30ae13f073a16bc54b1b933b52681901c676d623": { "describe": { "columns": [ { - "name": "number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "is_finished", + "name": "full_fee", "ordinal": 2, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_gas_price", + "name": "miniblock_number", "ordinal": 30, "type_info": "Int8" }, { - "name": "l2_fair_gas_price", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "rollup_last_leaf_index", + "name": "refunded_gas", "ordinal": 32, "type_info": "Int8" }, { - "name": "zkporter_is_available", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 35, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 36, "type_info": "Numeric" }, { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, + "name": "l1_tx_refund_recipient", + "ordinal": 34, "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 41, - "type_info": "Int4" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ false, false, + true, + true, false, + true, + true, + true, false, false, - false, - false, - false, - true, true, true, true, @@ -474,274 +464,60 @@ true, true, true, - false, - false, true, false, false, false, - false, - false, - false, - false, - true, - true, true, + false, true, false, false, + false, true, true, true, true, - false, - true, - true, true, false, true, - false + true ], "parameters": { - "Left": [ - "Int8", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit\n FROM\n (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number\n FROM l1_batches\n LEFT JOIN prover_jobs ON prover_jobs.l1_batch_number = l1_batches.number\n WHERE eth_commit_tx_id IS NOT NULL\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n AND l1_batches.number > $1\n ORDER BY number LIMIT $2) inn\n WHERE number - row_number = $1\n " + "query": "\n SELECT * FROM transactions\n WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL\n ORDER BY miniblock_number, index_in_block\n " }, - "0b934f7671826b45d5a6f95f30ae13f073a16bc54b1b933b52681901c676d623": { + "0c729d441aceba247e36c08a89661c35b476d4d7c73882147699009affe78472": { "describe": { "columns": [ { - "name": "hash", + "name": "l1_batch_number!", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "circuit_id", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int2" }, { - "name": "full_fee", + "name": "aggregation_round", "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, - "type_info": "Numeric" - }, - { - "name": "initiator_address", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 11, - "type_info": "Int8" - }, - { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 18, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 19, - "type_info": "Timestamp" - }, - { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" - }, - { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" - }, - { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" - }, - { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" - }, - { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" + "type_info": "Int2" } ], "nullable": [ + null, false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - false, - true, - false, - false, - false, - true, - true, - true, - true, - true, - false, - true, - true + false ], "parameters": { "Left": [] } }, - "query": "\n SELECT * FROM transactions\n WHERE miniblock_number IS NOT NULL AND l1_batch_number IS NULL\n ORDER BY miniblock_number, index_in_block\n " - }, - "0cd13b94dc52a1a5228ed7a7c673add0aaf39a8bb378b97f49f256cb233b8a63": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "DELETE FROM contract_verification_zksolc_versions" + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_id, aggregation_round\n FROM prover_jobs_fri\n WHERE status IN('queued', 'in_progress', 'failed')\n GROUP BY circuit_id, aggregation_round\n " }, "0d1bed183c38304ff1a6c8c78dca03964e2e188a6d01f98eaf0c6b24f19b8b6f": { "describe": { @@ -773,25 +549,70 @@ }, "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" }, - "0f8a603899280c015b033c4160bc064865103e9d6d63a369f07a8e5d859a7b14": { + "0e001ef507253b4fd3a87e379c8f2e63fa41250b1a396d81697de2b7ea71215e": { "describe": { "columns": [ { - "name": "timestamp", + "name": "count!", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { "Left": [ - "Int8" - ] - } - }, - "query": "SELECT timestamp FROM miniblocks WHERE number = $1" + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Bytea" + ] + } + }, + "query": "SELECT COUNT(*) as \"count!\" FROM l1_batches WHERE number = $1 AND hash = $2 AND merkle_root_hash = $3 AND parent_hash = $4 AND l2_l1_merkle_root = $5" + }, + "0f5897b5e0109535caa3d49f899c65e5080511d49305558b59b185c34227aa18": { + "describe": { + "columns": [ + { + "name": "nonce!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + } + }, + "query": "SELECT nonce as \"nonce!\" FROM transactions WHERE initiator_address = $1 AND nonce >= $2 AND is_priority = FALSE AND (miniblock_number IS NOT NULL OR error IS NULL) ORDER BY nonce" + }, + "0f8a603899280c015b033c4160bc064865103e9d6d63a369f07a8e5d859a7b14": { + "describe": { + "columns": [ + { + "name": "timestamp", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT timestamp FROM miniblocks WHERE number = $1" }, "0fd885074c624bea478ec0a24a499cf1278773cdba92550439da5d3b70cbf38c": { "describe": { @@ -837,6 +658,134 @@ }, "query": "SELECT tx_hash FROM eth_txs_history\n WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL" }, + "142c812f70d8c0cef986bef9b3c058e148f2cfb1c2c933ff321cf498b9c6e3b2": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "last_batch_miniblock?", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 6, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 8, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 13, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 14, + "type_info": "Bytea" + }, + { + "name": "fee_account_address?", + "ordinal": 15, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + null, + false, + false, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + }, + "14815f61d37d274f9aea1125ca4d368fd8c45098b0017710c0ee18d23d994c15": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" + }, "151aa7cab859c275f74f981ed146415e1e5242ebe259552d5b9fac333c0d9ce8": { "describe": { "columns": [], @@ -881,6 +830,33 @@ }, "query": "DELETE FROM storage_logs WHERE miniblock_number > $1" }, + "1948ab14bafbb3ba0098563f22d958c9383877788980fe51bd217987898b1c92": { + "describe": { + "columns": [ + { + "name": "hashed_key!", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "value?", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + null, + null + ], + "parameters": { + "Left": [ + "ByteaArray", + "Int8" + ] + } + }, + "query": "SELECT u.hashed_key as \"hashed_key!\", (SELECT value FROM storage_logs WHERE hashed_key = u.hashed_key AND miniblock_number <= $2 ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\" FROM UNNEST($1::bytea[]) AS u(hashed_key)" + }, "19b89495be8aa735db039ccc8a262786c58e54f132588c48f07d9537cf21d3ed": { "describe": { "columns": [ @@ -1111,31 +1087,18 @@ }, "query": "\n SELECT l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n l1_batches.l1_gas_price,\n l1_batches.l2_fair_gas_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE l1_batches.number = $1\n " }, - "1d26bb777f103d83523d223071eaa8391049c0efec9406e37003ac08065d389f": { + "1c583696808f93ff009ddf5df0ea36fe2621827fbd425c39ed4c9670ebc6431b": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bool", - "Bytea", - "Int8", - "Bytea", - "Bytea", - "Bytea", + "Text", "Int8" ] } }, - "query": "\n UPDATE l1_batches\n SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4,\n compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7,\n l2_l1_merkle_root = $8,\n zkporter_is_available = $9, bootloader_code_hash = $10, rollup_last_leaf_index = $11,\n aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14,\n updated_at = now()\n WHERE number = $15\n " + "query": "\n UPDATE witness_inputs_fri SET status =$1, updated_at = now()\n WHERE l1_batch_number = $2\n " }, "1d3e9cd259fb70a2bc81e8344576c3fb27b47ad6cdb6751d2a9b8c8d342b7a75": { "describe": { @@ -1150,65 +1113,131 @@ }, "query": "\n UPDATE prover_jobs\n SET status = $1, updated_at = now()\n WHERE id = $2\n " }, - "1eede5c2169aee5a767b3b6b829f53721c0c353956ccec31a75226a65325ae46": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "UPDATE transactions SET in_mempool = FALSE WHERE in_mempool = TRUE" - }, - "1f3e41f4ac5b1f6e735f1c422c0098ed534d9e8fe84e98b3234e893e8a2c5085": { + "1dbe99ed32b361936c2a829a99a92ac792a02c8a304d23b140804844a7b0f857": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 1, + "type_info": "Int2" + }, + { + "name": "depth", + "ordinal": 2, "type_info": "Int4" } ], "nullable": [ + false, + false, false ], "parameters": { - "Left": [ - "Text" - ] + "Left": [] } }, - "query": "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs\n ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id\n WHERE eth_txs_history.tx_hash = $1" + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id, depth) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth\n FROM prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs\n HAVING COUNT(*) = nawj.number_of_dependent_jobs)\n RETURNING l1_batch_number, circuit_id, depth;\n " }, - "1faf6552c221c75b7232b55210c0c37be76a57ec9dc94584b6ccb562e8b182f2": { + "1e68306cbd83eb6b5de59fb8f638c8f3252732b9074e2455f7b5aedf6fdc886f": { "describe": { "columns": [ { - "name": "id", + "name": "count!", "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", + "name": "circuit_id!", "ordinal": 1, - "type_info": "Int8" + "type_info": "Int2" }, { - "name": "circuit_type", + "name": "aggregation_round!", "ordinal": 2, - "type_info": "Text" + "type_info": "Int2" }, { - "name": "prover_input", + "name": "status!", "ordinal": 3, - "type_info": "Bytea" - }, - { - "name": "status", - "ordinal": 4, "type_info": "Text" - }, - { + } + ], + "nullable": [ + null, + false, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\", circuit_id as \"circuit_id!\", aggregation_round as \"aggregation_round!\", status as \"status!\"\n FROM prover_jobs_fri\n GROUP BY circuit_id, aggregation_round, status\n " + }, + "1eede5c2169aee5a767b3b6b829f53721c0c353956ccec31a75226a65325ae46": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "UPDATE transactions SET in_mempool = FALSE WHERE in_mempool = TRUE" + }, + "1f3e41f4ac5b1f6e735f1c422c0098ed534d9e8fe84e98b3234e893e8a2c5085": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "SELECT eth_txs.id FROM eth_txs_history JOIN eth_txs\n ON eth_txs.confirmed_eth_tx_history_id = eth_txs_history.id\n WHERE eth_txs_history.tx_hash = $1" + }, + "1faf6552c221c75b7232b55210c0c37be76a57ec9dc94584b6ccb562e8b182f2": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_type", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "prover_input", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { "name": "error", "ordinal": 5, "type_info": "Text" @@ -1410,6 +1439,19 @@ }, "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, + "21504ecf55757d6b487f8c21fa72821109c70736185f616be7e180d1b31ca9a1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "\n UPDATE transactions\n SET effective_gas_price = 0\n WHERE miniblock_number BETWEEN $1 AND $2\n AND is_priority = TRUE\n " + }, "227daa1e8d647c207869d7c306d9d13a38c6baf07281cf72cd93d20da2e3cf3c": { "describe": { "columns": [ @@ -1449,78 +1491,7 @@ }, "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true AND miniblock_number IS NOT NULL" }, - "230cbdfecc31d22f490d98e52dacd69739b654491042dc32a0f5e672281822f7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "update storage set value = $1 where hashed_key = $2" - }, - "2397c1a050d358b596c9881c379bf823e267c03172f72c42da84cc0c04cc9d93": { - "describe": { - "columns": [ - { - "name": "miniblock_number!", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "index_in_block!", - "ordinal": 2, - "type_info": "Int4" - }, - { - "name": "l1_batch_tx_index!", - "ordinal": 3, - "type_info": "Int4" - } - ], - "nullable": [ - true, - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " - }, - "249d8c0334a8a1a4ff993f72f5245dc55c60773732bfe7596dc5f05f34c15131": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Text" - ] - } - }, - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ('\\x00', 0, $1, '', 0, now(), now())\n RETURNING id" - }, - "24abd3109457403cbb8dc59f8805e0426d6da3b766ddae1516d45ad0b1277bc7": { + "230ad5f76b258a756e91732857db772b1f241066278fefc742122f4d1830f56e": { "describe": { "columns": [ { @@ -1784,13 +1755,82 @@ false, false ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT * FROM l1_batches ORDER BY number DESC LIMIT 1" + }, + "2397c1a050d358b596c9881c379bf823e267c03172f72c42da84cc0c04cc9d93": { + "describe": { + "columns": [ + { + "name": "miniblock_number!", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "index_in_block!", + "ordinal": 2, + "type_info": "Int4" + }, + { + "name": "l1_batch_tx_index!", + "ordinal": 3, + "type_info": "Int4" + } + ], + "nullable": [ + true, + false, + true, + true + ], "parameters": { "Left": [ - "Int4" + "Int8" + ] + } + }, + "query": "\n SELECT miniblock_number as \"miniblock_number!\",\n hash, index_in_block as \"index_in_block!\", l1_batch_tx_index as \"l1_batch_tx_index!\"\n FROM transactions\n WHERE l1_batch_number = $1\n ORDER BY miniblock_number, index_in_block\n " + }, + "2424f0ab2b156e953841107cfc0ccd76519d13c62fdcd5fd6b39e3503d6ec82c": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE l1_batch_number = $2\n " + }, + "249d8c0334a8a1a4ff993f72f5245dc55c60773732bfe7596dc5f05f34c15131": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" ] } }, - "query": "SELECT * FROM l1_batches\n WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1" + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ('\\x00', 0, $1, '', 0, now(), now())\n RETURNING id" }, "251d3e3615046ec5f061cfba65dc5ad891ee7fa315abe39aedbd291e36140610": { "describe": { @@ -1900,23 +1940,23 @@ }, "query": "\n SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND updated_at < NOW() - INTERVAL '30 days'\n AND scheduler_witness_blob_url is NOT NULL\n AND final_node_aggregations_blob_url is NOT NULL\n LIMIT $1;\n " }, - "2911797974d340cc75bb628866c24f77665e3dca3954f0c83860da488265f5c6": { + "269f3ac58705d65f775a6c84a62b9c0726beef51eb633937fa2a75b80c6d7fbc": { "describe": { "columns": [ { - "name": "address", + "name": "hash", "ordinal": 0, "type_info": "Bytea" }, { - "name": "key", + "name": "number", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "value", + "name": "timestamp", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ @@ -1930,45 +1970,45 @@ ] } }, - "query": "\n SELECT address, key, value\n FROM storage_logs\n WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1)\n AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1)\n ORDER BY miniblock_number, operation_number\n " - }, - "2b07fff3b8f793c010c0bd6f706d7c43786305e3335fd6ae344664ec60f815a8": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [] - } - }, - "query": "DELETE FROM contract_verification_solc_versions" + "query": "SELECT hash, number, timestamp FROM miniblocks WHERE number > $1 ORDER BY number ASC" }, - "2b22e7d15adf069c8e68954059b83f71a71350f3325b4280840c4be7e54a319f": { + "2928cd054e9d6898559f964906a2ee0d3750fbe6fbd99209a48fc7b197fa2a22": { "describe": { "columns": [ { - "name": "l1_address", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "l2_address", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "name", + "name": "circuit_id", "ordinal": 2, - "type_info": "Varchar" + "type_info": "Int2" }, { - "name": "symbol", + "name": "aggregation_round", "ordinal": 3, - "type_info": "Varchar" + "type_info": "Int2" }, { - "name": "decimals", + "name": "sequence_number", "ordinal": 4, "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" } ], "nullable": [ @@ -1976,84 +2016,133 @@ false, false, false, + false, + false, false ], "parameters": { - "Left": [] + "Left": [ + "Int2Array", + "Int2Array" + ] } }, - "query": "SELECT l1_address, l2_address, name, symbol, decimals FROM tokens\n WHERE well_known = true\n ORDER BY symbol" + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n AND (circuit_id, aggregation_round) IN (\n SELECT * FROM UNNEST($1::smallint[], $2::smallint[])\n )\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, - "2c136284610f728ddba3e255d7dc573b10e4baf9151de194b7d8e0dc40c40602": { + "2985ea2bf34a94573103654c00a49d2a946afe5d552ac1c2a2d055eb9d6f2cf1": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Jsonb" + "Time", + "Int8" ] } }, - "query": "INSERT INTO transaction_traces (tx_hash, trace, created_at, updated_at) VALUES ($1, $2, now(), now())" + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " }, - "2eea5d279edc2b23cab00d2be00d046f741552e5d86dfdf61d7e3847a4bb65d8": { + "2adfdba6fa2b6b967ba03ae6f930e7f3ea851f678d30df699ced27b2dbb01c2a": { "describe": { "columns": [ { - "name": "count!", + "name": "number", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - null + false ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contracts_verification_info\n WHERE address = $1\n " + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) WHERE execute_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, - "2f5f9182c87944bf7856ee8e6036e49118477c62d3085c4bab32150f268dfa58": { + "2b22e7d15adf069c8e68954059b83f71a71350f3325b4280840c4be7e54a319f": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "l1_address", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "l2_address", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "name", + "ordinal": 2, + "type_info": "Varchar" + }, + { + "name": "symbol", + "ordinal": 3, + "type_info": "Varchar" + }, + { + "name": "decimals", + "ordinal": 4, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false, + false, + false, + false + ], "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bool", - "Bytea", - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Int8" - ] + "Left": [] } }, - "query": "\n UPDATE l1_batches SET\n hash = $1, merkle_root_hash = $2, commitment = $3, \n compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6,\n l2_l1_merkle_root = $7, zkporter_is_available = $8, \n parent_hash = $9, rollup_last_leaf_index = $10, \n aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13,\n updated_at = NOW()\n WHERE number = $14 AND hash IS NULL\n " + "query": "SELECT l1_address, l2_address, name, symbol, decimals FROM tokens\n WHERE well_known = true\n ORDER BY symbol" }, - "2ff4a13a75537cc30b2c3d52d3ef6237850150e4a4569adeaa4da4a9ac5bc689": { + "2b76ca7059810f691a2d7d053e7e62e06de13e7ddb7747e39335bb10c45534e9": { "describe": { "columns": [ { - "name": "bytecode", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 1, + "type_info": "Int2" } ], "nullable": [ + false, false ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id\n FROM prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON\n prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number, circuit_id;\n " + }, + "2c136284610f728ddba3e255d7dc573b10e4baf9151de194b7d8e0dc40c40602": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Jsonb" + ] + } + }, + "query": "INSERT INTO transaction_traces (tx_hash, trace, created_at, updated_at) VALUES ($1, $2, now(), now())" + }, + "2e543dc0013150040bb86e278bbe86765ce1ebad72a32bb931fe02a9c516a11c": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ "Bytea", @@ -2061,51 +2150,48 @@ ] } }, - "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2" + "query": "UPDATE l1_batches SET hash = $1 WHERE number = $2" }, - "3221b722354995f0705ceaf913a48aa092129bb4ff561a1104196f5b25192576": { + "2eea5d279edc2b23cab00d2be00d046f741552e5d86dfdf61d7e3847a4bb65d8": { "describe": { "columns": [ { - "name": "version", + "name": "count!", "ordinal": 0, - "type_info": "Text" + "type_info": "Int8" } ], "nullable": [ - false + null ], "parameters": { - "Left": [] + "Left": [ + "Bytea" + ] } }, - "query": "SELECT version FROM contract_verification_zksolc_versions ORDER by version" + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contracts_verification_info\n WHERE address = $1\n " }, - "32236a83e1525748f736fa87d53df6005e49f21968e90af9d933359fdd3fb330": { + "2ff4a13a75537cc30b2c3d52d3ef6237850150e4a4569adeaa4da4a9ac5bc689": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "bytecode", "ordinal": 0, "type_info": "Bytea" - }, - { - "name": "call_trace", - "ordinal": 1, - "type_info": "Bytea" } ], "nullable": [ - false, false ], "parameters": { "Left": [ + "Bytea", "Int8" ] } }, - "query": "\n SELECT * FROM call_traces WHERE tx_hash IN (\n SELECT hash FROM transactions WHERE miniblock_number = $1\n )\n " + "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2" }, "335826f54feadf6aa30a4e7668ad3f17a2afc6bd67d4f863e3ad61fefd1bd8d2": { "describe": { @@ -2125,6 +2211,78 @@ }, "query": "SELECT MAX(number) as \"number\" FROM miniblocks" }, + "3418353764615faa995ff518579ff2f28b79f60d0421cb4d209f62a0abbf06cf": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash FROM miniblocks ORDER BY number DESC LIMIT 1" + }, "36c483775b604324eacd7e5aac591b927cc32abb89fe1b0c5cf4b0383e9bd443": { "describe": { "columns": [ @@ -2249,208 +2407,11 @@ }, "query": "DELETE FROM call_traces\n WHERE tx_hash = ANY($1)" }, - "3b0bfc7445faaa87f6cabb68419ebff995120d65db3a4def70d998507e699811": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "ByteaArray", - "Int4Array", - "VarcharArray", - "JsonbArray", - "Int8Array" - ] - } - }, - "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::bigint[]) as refunded_gas\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " - }, - "3c582aeed32235ef175707de412a9f9129fad6ea5e87ebb85f68e20664b0da46": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4Array", - "ByteaArray", - "Int8" - ] - } - }, - "query": "\n UPDATE transactions\n SET \n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = now()\n FROM\n (SELECT\n UNNEST($1::int[]) AS l1_batch_tx_index,\n UNNEST($2::bytea[]) AS hash\n ) AS data_table\n WHERE transactions.hash=data_table.hash \n " - }, - "3cb9fd0e023940d4e30032a9b0528a95513468ebf701557153c5f1417bdb847f": { + "3bc54eb6ad9c5b7810954f2dfd7c49ff0d4f2bc5c020b04448db6b5883439a2d": { "describe": { "columns": [ { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Text", - "Text", - "Text", - "Text", - "Bool", - "Bytea", - "Bool" - ] - } - }, - "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n compiler_zksolc_version,\n compiler_solc_version,\n optimization_used,\n constructor_arguments,\n is_system,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now())\n RETURNING id\n " - }, - "3d41f05e1d5c5a74e0605e66fe08e09f14b8bf0269e5dcde518aa08db92a3ea0": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM events WHERE miniblock_number > $1" - }, - "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Timestamp" - ] - } - }, - "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" - }, - "3f86b7cb793dd8849af45ff3de4eabb80082a1cf8b213be607e6e13bb3d6710d": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "basic_circuits", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "basic_circuits_inputs", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "number_of_basic_circuits", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "processing_started_at", - "ordinal": 5, - "type_info": "Timestamp" - }, - { - "name": "time_taken", - "ordinal": 6, - "type_info": "Time" - }, - { - "name": "error", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "created_at", - "ordinal": 8, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "attempts", - "ordinal": 10, - "type_info": "Int4" - }, - { - "name": "basic_circuits_blob_url", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "basic_circuits_inputs_blob_url", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 13, - "type_info": "Bool" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - true, - true, - true, - false, - false, - false, - true, - true, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int4", - "Int8" - ] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " - }, - "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - } - }, - "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1 AND status != 'queued'\n " - }, - "41913b02b13a0dad87268c5e0d673d9f04d5207ab6a48b63004e6c3ed07b93bc": { - "describe": { - "columns": [ - { - "name": "number", + "name": "number", "ordinal": 0, "type_info": "Int8" }, @@ -2710,287 +2671,256 @@ false, false ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT * FROM l1_batches WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL ORDER BY number DESC LIMIT 1" - }, - "42d2c16694dbf70205748008a18424bcbb689aff8317079dc6d60c411541167d": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Int8", + "Bytea", + "Bytea", "Int8" ] } }, - "query": "\n UPDATE l1_batches\n SET predicted_commit_gas_cost = $2, updated_at = now()\n WHERE number = $1\n " - }, - "433d5da4d72150cf2c1e1007ee3ff51edfa51924f4b662b8cf382f06e60fd228": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Text", - "Text" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " + "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number != 0 AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 AND commitment IS NOT NULL ORDER BY number LIMIT $3" }, - "438ea2edcf2e5ec1ec8b05da4d634e914e4d892441b6f2926f0926c7c90e33d1": { + "3c582aeed32235ef175707de412a9f9129fad6ea5e87ebb85f68e20664b0da46": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Text", - "Jsonb" - ] - } - }, - "query": "INSERT INTO contract_sources (address, assembly_code, pc_line_mapping, created_at, updated_at)\n VALUES ($1, $2, $3, now(), now())\n ON CONFLICT (address)\n DO UPDATE SET assembly_code = $2, pc_line_mapping = $3, updated_at = now()\n " - }, - "43f48f445f7e1627123e04b56c1899d1eee17c44411f3cbc59a809e3b16c158c": { - "describe": { - "columns": [ - { - "name": "hashed_key", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ + "Int4Array", + "ByteaArray", "Int8" ] } }, - "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM\n (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" + "query": "\n UPDATE transactions\n SET \n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = now()\n FROM\n (SELECT\n UNNEST($1::int[]) AS l1_batch_tx_index,\n UNNEST($2::bytea[]) AS hash\n ) AS data_table\n WHERE transactions.hash=data_table.hash \n " }, - "474c72dc36171ee1983e0eb4272cdbc180e3773093280556e8e5229b68bc793d": { + "3ccd4d053bb664a40c3887ef4f87fe2d4aa8be36e6c84c5f1d358ce712072082": { "describe": { "columns": [ { - "name": "hash", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "is_priority", + "name": "timestamp", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "full_fee", + "name": "is_finished", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Bool" }, { - "name": "layer_2_tip_fee", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "initiator_address", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "nonce", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "signature", + "name": "bloom", "ordinal": 6, "type_info": "Bytea" }, { - "name": "input", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Bytea" + "type_info": "ByteaArray" }, { - "name": "data", + "name": "hash", "ordinal": 8, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "received_at", + "name": "parent_hash", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "priority_op_id", + "name": "commitment", "ordinal": 10, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_batch_number", + "name": "compressed_write_logs", "ordinal": 11, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "index_in_block", + "name": "compressed_contracts", "ordinal": 12, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "error", + "name": "eth_prove_tx_id", "ordinal": 13, - "type_info": "Varchar" + "type_info": "Int4" }, { - "name": "gas_limit", + "name": "eth_commit_tx_id", "ordinal": 14, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "gas_per_storage_limit", + "name": "eth_execute_tx_id", "ordinal": 15, - "type_info": "Numeric" + "type_info": "Int4" }, { - "name": "gas_per_pubdata_limit", + "name": "created_at", "ordinal": 16, - "type_info": "Numeric" + "type_info": "Timestamp" }, { - "name": "tx_format", + "name": "updated_at", "ordinal": 17, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "created_at", + "name": "merkle_root_hash", "ordinal": 18, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "l2_to_l1_logs", "ordinal": 19, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "execution_info", + "name": "l2_to_l1_messages", "ordinal": 20, - "type_info": "Jsonb" + "type_info": "ByteaArray" }, { - "name": "contract_address", + "name": "predicted_commit_gas_cost", "ordinal": 21, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "in_mempool", + "name": "predicted_prove_gas_cost", "ordinal": 22, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "l1_block_number", + "name": "predicted_execute_gas_cost", "ordinal": 23, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "value", + "name": "initial_bootloader_heap_content", "ordinal": 24, - "type_info": "Numeric" + "type_info": "Jsonb" }, { - "name": "paymaster", + "name": "used_contract_hashes", "ordinal": 25, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "paymaster_input", + "name": "compressed_initial_writes", "ordinal": 26, "type_info": "Bytea" }, { - "name": "max_fee_per_gas", + "name": "compressed_repeated_writes", "ordinal": 27, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "max_priority_fee_per_gas", + "name": "l2_l1_compressed_messages", "ordinal": 28, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "effective_gas_price", + "name": "l2_l1_merkle_root", "ordinal": 29, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "miniblock_number", + "name": "gas_per_pubdata_byte_in_block", "ordinal": 30, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "l1_batch_tx_index", + "name": "rollup_last_leaf_index", "ordinal": 31, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "refunded_gas", + "name": "zkporter_is_available", "ordinal": 32, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "l1_tx_mint", + "name": "bootloader_code_hash", "ordinal": 33, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "l1_tx_refund_recipient", + "name": "default_aa_code_hash", "ordinal": 34, "type_info": "Bytea" }, { - "name": "block_hash?", + "name": "base_fee_per_gas", "ordinal": 35, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "miniblock_timestamp?", + "name": "gas_per_pubdata_limit", "ordinal": 36, "type_info": "Int8" }, { - "name": "eth_commit_tx_hash?", + "name": "aux_data_hash", "ordinal": 37, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "eth_prove_tx_hash?", + "name": "pass_through_data_hash", "ordinal": 38, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "eth_execute_tx_hash?", + "name": "meta_parameters_hash", "ordinal": 39, - "type_info": "Text" + "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" } ], "nullable": [ false, false, - true, - true, false, - true, - true, - true, + false, + false, + false, false, false, true, @@ -3003,10 +2933,11 @@ true, false, false, - false, true, false, - true, + false, + false, + false, false, false, false, @@ -3015,109 +2946,151 @@ true, true, true, - false, + true, + true, true, true, false, false, + true, + true, + true, false, false, false ], "parameters": { "Left": [ - "Bytea" + "Int8", + "Int8", + "Int8" ] } }, - "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n miniblocks.timestamp as \"miniblock_timestamp?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " + "query": "SELECT * FROM l1_batches WHERE number BETWEEN $1 AND $2 ORDER BY number LIMIT $3" + }, + "3d41f05e1d5c5a74e0605e66fe08e09f14b8bf0269e5dcde518aa08db92a3ea0": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM events WHERE miniblock_number > $1" + }, + "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Numeric", + "Timestamp" + ] + } + }, + "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" + }, + "3f6332706376ef4cadda96498872429b6ed28eca5402b03b1aa3b77b8262bccd": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "DELETE FROM compiler_versions WHERE compiler = $1" + }, + "3f671298a05f3f69a8ffb2e36d5ae79c544145fc1c289dd9e0c060dca3ec6e21": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray" + ] + } + }, + "query": "UPDATE storage SET value = u.value FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) WHERE u.key = hashed_key" }, - "4a8a5df72c08e9a3423e93be72dd63c38daefd644977685384327689892e68cd": { + "3f86b7cb793dd8849af45ff3de4eabb80082a1cf8b213be607e6e13bb3d6710d": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "contract_address", + "name": "basic_circuits", "ordinal": 1, "type_info": "Bytea" }, { - "name": "source_code", + "name": "basic_circuits_inputs", "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "contract_name", + "name": "number_of_basic_circuits", "ordinal": 3, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "compiler_zksolc_version", + "name": "status", "ordinal": 4, "type_info": "Text" }, { - "name": "optimization_used", + "name": "processing_started_at", "ordinal": 5, - "type_info": "Bool" + "type_info": "Timestamp" }, { - "name": "constructor_arguments", + "name": "time_taken", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Time" }, { - "name": "status", + "name": "error", "ordinal": 7, "type_info": "Text" }, { - "name": "error", + "name": "created_at", "ordinal": 8, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "created_at", + "name": "updated_at", "ordinal": 9, "type_info": "Timestamp" }, { - "name": "updated_at", + "name": "attempts", "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "compilation_errors", + "name": "basic_circuits_blob_url", "ordinal": 11, - "type_info": "Jsonb" - }, - { - "name": "processing_started_at", - "ordinal": 12, - "type_info": "Timestamp" - }, - { - "name": "compiler_solc_version", - "ordinal": 13, "type_info": "Text" }, { - "name": "attempts", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "panic_message", - "ordinal": 15, + "name": "basic_circuits_inputs_blob_url", + "ordinal": 12, "type_info": "Text" }, { - "name": "is_system", - "ordinal": 16, + "name": "is_blob_cleaned", + "ordinal": 13, "type_info": "Bool" } ], @@ -3127,464 +3100,161 @@ false, false, false, - false, - false, - false, true, - false, - false, true, true, false, false, + false, + true, true, false ], "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int4", + "Int8" + ] } }, - "query": "SELECT * FROM contract_verification_requests\n WHERE status = 'successful'\n ORDER BY id" + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM leaf_aggregation_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs.*\n " }, - "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { + "40a86f39a74ab22bdcd8b40446ea063c68bfb3e930e3150212474a657e82b38f": { "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Interval" + "Int8", + "Text" ] } }, - "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" + "query": "\n UPDATE scheduler_witness_jobs\n SET final_node_aggregations_blob_url = $2,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $1 AND status != 'queued'\n " }, - "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { + "41913b02b13a0dad87268c5e0d673d9f04d5207ab6a48b63004e6c3ed07b93bc": { "describe": { "columns": [ { - "name": "hashed_key", + "name": "number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "value!", + "name": "timestamp", "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", + "ordinal": 2, + "type_info": "Bool" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "fee_account_address", + "ordinal": 5, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" - }, - "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { - "describe": { - "columns": [ + }, { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" - }, - "4bab972cbbd8b53237a840ba9307079705bd4b5270428d2b41f05ee3d2aa42af": { - "describe": { - "columns": [ + "name": "bloom", + "ordinal": 6, + "type_info": "Bytea" + }, { - "name": "l1_batch_number!", - "ordinal": 0, - "type_info": "Int8" + "name": "priority_ops_onchain_data", + "ordinal": 7, + "type_info": "ByteaArray" }, { - "name": "circuit_type", - "ordinal": 1, - "type_info": "Text" - } - ], - "nullable": [ - null, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_type\n FROM prover_jobs\n WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR status = 'failed')\n GROUP BY circuit_type\n " - }, - "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" - }, - "4ca0356959e4cc50e09b6fe08e9d45cbd929601935506acbbade4a42c2eaea89": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Text" - ] - } - }, - "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now())\n " - }, - "4d7b5a423b29ce07bd12f168d1ee707e6e413d9a4f0daafb4beed102d22d1745": { - "describe": { - "columns": [ + "name": "hash", + "ordinal": 8, + "type_info": "Bytea" + }, { - "name": "address", - "ordinal": 0, + "name": "parent_hash", + "ordinal": 9, "type_info": "Bytea" }, { - "name": "key", - "ordinal": 1, + "name": "commitment", + "ordinal": 10, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT address, key FROM protective_reads\n WHERE l1_batch_number = $1\n " - }, - "4dc63a4431062cb1ae428db625251a6121c3aa2fc06e045ae07b3db6d2f66406": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE witness_inputs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "4e2b733fea9ca7cef542602fcd80acf1a9d2e0f1e22566f1076c4837e3ac7e61": { - "describe": { - "columns": [ + }, { - "name": "id", - "ordinal": 0, - "type_info": "Int8" + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" }, { - "name": "instance_host", - "ordinal": 1, - "type_info": "Inet" + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" }, { - "name": "instance_port", - "ordinal": 2, + "name": "eth_prove_tx_id", + "ordinal": 13, "type_info": "Int4" }, { - "name": "instance_status", - "ordinal": 3, - "type_info": "Text" + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" }, { "name": "created_at", - "ordinal": 4, + "ordinal": 16, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 5, + "ordinal": 17, "type_info": "Timestamp" }, { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" }, { - "name": "queue_free_slots", - "ordinal": 7, - "type_info": "Int4" + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" }, { - "name": "queue_capacity", - "ordinal": 8, - "type_info": "Int4" + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" }, { - "name": "specialized_prover_group_id", - "ordinal": 9, - "type_info": "Int2" + "name": "predicted_commit_gas_cost", + "ordinal": 21, + "type_info": "Int8" }, { - "name": "region", - "ordinal": 10, - "type_info": "Text" + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" }, { - "name": "zone", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "num_gpu", - "ordinal": 12, - "type_info": "Int2" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - false, - false, - true - ], - "parameters": { - "Left": [ - "Interval", - "Int2", - "Text", - "Text" - ] - } - }, - "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " - }, - "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int4Array", - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "Int4Array", - "Int4Array", - "VarcharArray", - "NumericArray", - "JsonbArray", - "ByteaArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "Int8" - ] - } - }, - "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " - }, - "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" - }, - "50f406ffe7802e753411baa0e348294bdb05c96b96b2041ee876e2b34a1a6ea6": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM l1_batches\n WHERE number = $1\n AND hash = $2\n AND merkle_root_hash = $3\n AND parent_hash = $4\n AND l2_l1_merkle_root = $5\n " - }, - "516e309a97010cd1eb8398b2b7ff809786703c075e4c3dff1133c41cdcfdd3f3": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "fee_account_address", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "bloom", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "priority_ops_onchain_data", - "ordinal": 7, - "type_info": "ByteaArray" - }, - { - "name": "hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, - "type_info": "Int4" - }, - { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" }, { "name": "initial_bootloader_heap_content", @@ -3727,350 +3397,254 @@ false, false ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT * FROM l1_batches WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL ORDER BY number DESC LIMIT 1" + }, + "433d5da4d72150cf2c1e1007ee3ff51edfa51924f4b662b8cf382f06e60fd228": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int4", + "Int8", + "Text", + "Text" ] } }, - "query": "SELECT * FROM l1_batches WHERE number = $1" + "query": "\n UPDATE node_aggregation_witness_jobs\n SET number_of_leaf_circuits = $1,\n leaf_layer_subqueues_blob_url = $3,\n aggregation_outputs_blob_url = $4,\n status = 'waiting_for_proofs',\n updated_at = now()\n WHERE l1_batch_number = $2 AND status != 'queued'\n " + }, + "448d283cab6ae334de9676f69416974656d11563b58e0188d53ca9e0995dd287": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queued'\n WHERE l1_batch_number = ANY($1)\n " }, - "52602518095b2a45fadab7b76218acb6964b416a103be2a3b37b3dac4a970c14": { + "474c72dc36171ee1983e0eb4272cdbc180e3773093280556e8e5229b68bc793d": { "describe": { "columns": [ { - "name": "number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "hash", + "name": "full_fee", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "base_fee_per_gas", - "ordinal": 5, - "type_info": "Numeric" - }, - { - "name": "l1_gas_price", - "ordinal": 6, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 7, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 9, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - true - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n WHERE number = $1\n " - }, - "541d22a9ffe9c7b31833f203af0820cca4513d7a9e6feed7313757674c30e667": { - "describe": { - "columns": [ - { - "name": "address", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "key", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "value", - "ordinal": 2, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - false - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "\n SELECT address, key, value FROM storage_logs\n WHERE miniblock_number >= $1 AND miniblock_number <= $2\n ORDER BY miniblock_number, operation_number ASC\n " - }, - "5543380548ce40063d43c1d54e368c7d385800d7ade9e720306808cc4c376978": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "is_finished", - "ordinal": 2, - "type_info": "Bool" - }, - { - "name": "l1_tx_count", - "ordinal": 3, - "type_info": "Int4" - }, - { - "name": "l2_tx_count", - "ordinal": 4, - "type_info": "Int4" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "miniblock_number", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "zkporter_is_available", + "name": "refunded_gas", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "default_aa_code_hash", + "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "block_hash?", "ordinal": 35, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "miniblock_timestamp?", "ordinal": 36, "type_info": "Int8" }, { - "name": "aux_data_hash", + "name": "eth_commit_tx_hash?", "ordinal": 37, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "pass_through_data_hash", + "name": "eth_prove_tx_hash?", "ordinal": 38, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "meta_parameters_hash", + "name": "eth_execute_tx_hash?", "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "type_info": "Text" } ], "nullable": [ false, false, + true, + true, false, + true, + true, + true, false, false, - false, - false, - false, - true, true, true, true, @@ -4078,13 +3652,13 @@ true, true, true, - false, - false, true, false, false, false, + true, false, + true, false, false, false, @@ -4093,334 +3667,228 @@ true, true, true, - true, - true, + false, true, true, false, false, - true, - true, - true, false, false, false ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT * FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" - }, - "55ae3cf154fe027f9036c60d21b5fd32972fbb2b17a74562d7721ec69dd19971": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ "Bytea" ] } }, - "query": "delete from storage where hashed_key = $1" + "query": "\n SELECT transactions.*, miniblocks.hash as \"block_hash?\",\n miniblocks.timestamp as \"miniblock_timestamp?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, - "55debba852ef32f3b5ba6ffcb745f7b59d6888a21cb8792f8f9027e3b164a245": { + "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { "columns": [ { - "name": "region", + "name": "hash", "ordinal": 0, - "type_info": "Text" - }, - { - "name": "zone", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "total_gpus", - "ordinal": 2, - "type_info": "Int8" + "type_info": "Bytea" } ], "nullable": [ - false, - false, - null + false ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " - }, - "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { - "describe": { - "columns": [], - "nullable": [], "parameters": { "Left": [ - "Int4", - "Int8", - "Int8" + "Interval" ] } }, - "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "DELETE FROM transactions WHERE miniblock_number IS NULL AND received_at < now() - $1::interval AND is_priority=false AND error IS NULL RETURNING hash" }, - "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { + "4ac212a08324b9d4c3febc585109f19105b4d20aa3e290352e3c63d7ec58c5b2": { "describe": { "columns": [ { - "name": "count!", + "name": "l2_address", "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "circuit_type!", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "status!", - "ordinal": 2, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ - null, - false, false ], "parameters": { "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " - }, - "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - } - }, - "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" + "query": "SELECT l2_address FROM tokens" }, - "59b10abd699d19cbdf285334162ee40f294c5fad8f99fc00a4cdb3b233a494d6": { + "4ac92a8436108097a32e94e53f7fe99261c7c3a40dbc433c20ccea3a7d06650c": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "hashed_key", "ordinal": 0, "type_info": "Bytea" }, - { - "name": "topic2!", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "topic3!", - "ordinal": 2, - "type_info": "Bytea" - }, { "name": "value!", - "ordinal": 3, - "type_info": "Bytea" - }, - { - "name": "l1_address!", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "l2_address!", - "ordinal": 5, + "ordinal": 1, "type_info": "Bytea" - }, - { - "name": "symbol!", - "ordinal": 6, - "type_info": "Varchar" - }, - { - "name": "name!", - "ordinal": 7, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" } ], "nullable": [ false, - false, - false, - false, - false, - false, - false, - false, - false, - true + false ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "ByteaArray" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT hashed_key, value as \"value!\" FROM storage WHERE hashed_key = ANY($1)" }, - "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { + "4acb725974d006c388be8965c3dff2e4c538ab8d2366addb3fb8cff3b789f114": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "count!", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "attempts", - "ordinal": 1, - "type_info": "Int4" } ], "nullable": [ - false, - false + null ], "parameters": { "Left": [ - "Text", "Int8" ] } }, - "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " + "query": "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1" }, - "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { + "4bab972cbbd8b53237a840ba9307079705bd4b5270428d2b41f05ee3d2aa42af": { "describe": { "columns": [ { - "name": "value", + "name": "l1_batch_number!", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + }, + { + "name": "circuit_type", + "ordinal": 1, + "type_info": "Text" } ], "nullable": [ + null, false ], "parameters": { - "Left": [ - "Bytea" - ] + "Left": [] } }, - "query": "SELECT value FROM storage WHERE hashed_key = $1" + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number!\", circuit_type\n FROM prover_jobs\n WHERE aggregation_round = 0 AND (status = 'queued' OR status = 'in_progress'\n OR status = 'in_gpu_proof'\n OR status = 'failed')\n GROUP BY circuit_type\n " }, - "5b45825b92d6971d8b2fbad6eb68d24e1c666a54cbf1ceb1332e2039f9614d18": { + "4c0d2aa6e08f3b4748b88cad5cf7b3a9eb9c051e8e8e747a3c38c1b37ce3a6b7": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "TextArray" + "Int8" ] } }, - "query": "\n INSERT INTO contract_verification_zksolc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " + "query": "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1" + }, + "4c83881635e957872a435737392bfed829de58780887c9a0fa7921ea648296fb": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT 1" }, - "5b85d8bdf297f55e65978edda4a0966ded1dc0d24f4701e7b6048124f38b4cea": { + "4ca0356959e4cc50e09b6fe08e9d45cbd929601935506acbbade4a42c2eaea89": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray", - "ByteaArray", - "Int8" + "Int8", + "Bytea", + "Text" ] } }, - "query": "INSERT INTO factory_deps\n (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT u.bytecode_hash, u.bytecode, $3, now(), now()\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n " + "query": "\n INSERT INTO scheduler_witness_jobs\n (l1_batch_number, scheduler_witness, scheduler_witness_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, 'waiting_for_artifacts', now(), now())\n " }, - "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { + "4d2e106c809a48ace74952df2b883a5e747aaa1bc6bee28e986dccee7fa130b6": { "describe": { "columns": [ { - "name": "l1_batch_number!", + "name": "nonce", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "aggregation_round", - "ordinal": 1, - "type_info": "Int4" } ], "nullable": [ - null, false ], "parameters": { "Left": [] } }, - "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " + "query": "SELECT nonce FROM eth_txs ORDER BY id DESC LIMIT 1" }, - "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { + "4d36aff2bdeb0b659b8c4cd031f7c3fc204d92bb500a4efe8b6beb9255a232f6": { "describe": { "columns": [ { - "name": "number", + "name": "timestamp", "ordinal": 0, "type_info": "Int8" } ], "nullable": [ - null + false ], "parameters": { "Left": [] } }, - "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" + "query": "SELECT timestamp FROM l1_batches WHERE eth_execute_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" }, - "5d1c3357b97f5e40a7e9d6fdcb7c3ebd8309e93f26e1c42d6371190f4aeaf8c6": { + "4d7b5a423b29ce07bd12f168d1ee707e6e413d9a4f0daafb4beed102d22d1745": { "describe": { "columns": [ { - "name": "min?", + "name": "address", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "max?", + "name": "key", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" } ], "nullable": [ - null, - null + false, + false ], "parameters": { "Left": [ @@ -4428,93 +3896,75 @@ ] } }, - "query": "\n SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\"\n FROM miniblocks\n WHERE l1_batch_number = $1\n " + "query": "\n SELECT address, key FROM protective_reads\n WHERE l1_batch_number = $1\n " }, - "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { + "4e2b733fea9ca7cef542602fcd80acf1a9d2e0f1e22566f1076c4837e3ac7e61": { "describe": { "columns": [ { - "name": "op_id", + "name": "id", "ordinal": 0, "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" - }, - "5f5974e7033eea82896a435c7776a6740f4a2df77175744a9670d3fee2f24b32": { - "describe": { - "columns": [ - { - "name": "address", - "ordinal": 0, - "type_info": "Bytea" }, { - "name": "topic1", + "name": "instance_host", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Inet" }, { - "name": "topic2", + "name": "instance_port", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic3", + "name": "instance_status", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "topic4", + "name": "created_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "value", + "name": "updated_at", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "block_hash", + "name": "processing_started_at", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_batch_number?", + "name": "queue_free_slots", "ordinal": 7, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "miniblock_number", + "name": "queue_capacity", "ordinal": 8, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "tx_hash", + "name": "specialized_prover_group_id", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Int2" }, { - "name": "tx_index_in_block", + "name": "region", "ordinal": 10, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "event_index_in_block", + "name": "zone", "ordinal": 11, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "event_index_in_tx", + "name": "num_gpu", "ordinal": 12, - "type_info": "Int4" + "type_info": "Int2" } ], "nullable": [ @@ -4524,23 +3974,83 @@ false, false, false, - null, - null, - false, - false, + true, + true, + true, + true, false, false, - false + true ], "parameters": { "Left": [ - "Bytea" + "Interval", + "Int2", + "Text", + "Text" ] } }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " + "query": "\n UPDATE gpu_prover_queue\n SET instance_status = 'reserved',\n updated_at = now(),\n processing_started_at = now()\n WHERE id in (\n SELECT id\n FROM gpu_prover_queue\n WHERE specialized_prover_group_id=$2\n AND region=$3\n AND zone=$4\n AND (\n instance_status = 'available'\n OR (instance_status = 'reserved' AND processing_started_at < now() - $1::interval)\n )\n ORDER BY updated_at ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING gpu_prover_queue.*\n " + }, + "4eefec8f46f9b8bae265230dab09ab66fde5f24b023c87726dbd856e782de986": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "\n UPDATE transactions\n SET effective_gas_price = max_fee_per_gas\n WHERE miniblock_number BETWEEN $1 AND $2\n AND is_priority = TRUE\n " + }, + "5049eaa4b2050312d13a02c06e87f96548a299894d0f0b268d4e91d49c536cb6": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int4Array", + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "Int4Array", + "Int4Array", + "VarcharArray", + "NumericArray", + "JsonbArray", + "ByteaArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "Int8" + ] + } + }, + "query": "\n UPDATE transactions\n SET \n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::int[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::numeric[]) AS gas_limit,\n UNNEST($6::numeric[]) AS max_fee_per_gas,\n UNNEST($7::numeric[]) AS max_priority_fee_per_gas,\n UNNEST($8::numeric[]) AS gas_per_pubdata_limit,\n UNNEST($9::int[]) AS tx_format,\n UNNEST($10::integer[]) AS index_in_block,\n UNNEST($11::varchar[]) AS error,\n UNNEST($12::numeric[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::bigint[]) as refunded_gas,\n UNNEST($17::numeric[]) as value,\n UNNEST($18::bytea[]) as contract_address,\n UNNEST($19::bytea[]) as paymaster,\n UNNEST($20::bytea[]) as paymaster_input\n ) AS data_table\n WHERE transactions.initiator_address=data_table.initiator_address \n AND transactions.nonce=data_table.nonce\n " + }, + "5089dfb745ff04a9b071b5785e68194a6f6a7a72754d23a65adc7d6838f7f640": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1" }, - "604b41258da640307989571e014e8ccb4f457bba0caedcb42dc1065fc90f7950": { + "516e309a97010cd1eb8398b2b7ff809786703c075e4c3dff1133c41cdcfdd3f3": { "describe": { "columns": [ { @@ -4806,77 +4316,62 @@ ], "parameters": { "Left": [ - "Bytea", - "Bytea", "Int8" ] } }, - "query": "SELECT * FROM l1_batches\n WHERE eth_commit_tx_id IS NULL\n AND number != 0\n AND bootloader_code_hash = $1 AND default_aa_code_hash = $2\n AND commitment IS NOT NULL\n ORDER BY number LIMIT $3" - }, - "61f4f5ef369b2435732af17091493876301e3e59b68d6817fe0053c7da89291e": { - "describe": { - "columns": [ - { - "name": "max_nonce?", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT MAX(nonce) as \"max_nonce?\" FROM eth_txs" + "query": "SELECT * FROM l1_batches WHERE number = $1" }, - "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { + "51d788b5e8d808db143b6c057485f0a0b314a0c33e3eb2dff99ca0b32d12f8e4": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int4" + "Int8", + "Int2", + "Text", + "Int2", + "Int4", + "Int4", + "Bool" ] } }, - "query": "DELETE FROM eth_txs_history\n WHERE id = $1" + "query": "\n INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number)\n DO UPDATE SET updated_at=now()\n " }, - "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { + "52eeb8c529efb796fdefb30a381fcf6c931512f30e55e24c155f6c649e662909": { "describe": { "columns": [ { - "name": "status", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Text" - }, - { - "name": "error", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "compilation_errors", - "ordinal": 2, - "type_info": "Jsonb" + "type_info": "Int8" } ], "nullable": [ - false, - true, - true + false ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_dependency_tracker_fri\n SET status='queuing'\n WHERE l1_batch_number IN\n (SELECT l1_batch_number FROM scheduler_dependency_tracker_fri\n WHERE status != 'queued'\n AND circuit_1_final_prover_job_id IS NOT NULL\n AND circuit_2_final_prover_job_id IS NOT NULL\n AND circuit_3_final_prover_job_id IS NOT NULL\n AND circuit_4_final_prover_job_id IS NOT NULL\n AND circuit_5_final_prover_job_id IS NOT NULL\n AND circuit_6_final_prover_job_id IS NOT NULL\n AND circuit_7_final_prover_job_id IS NOT NULL\n AND circuit_8_final_prover_job_id IS NOT NULL\n AND circuit_9_final_prover_job_id IS NOT NULL\n AND circuit_10_final_prover_job_id IS NOT NULL\n AND circuit_11_final_prover_job_id IS NOT NULL\n AND circuit_12_final_prover_job_id IS NOT NULL\n AND circuit_13_final_prover_job_id IS NOT NULL\n )\n RETURNING l1_batch_number;\n " + }, + "53726a35b24a838df04c1f7201da322aab287830c96fc2c712a67d360bbc2bd0": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8" + "Int8", + "Text" ] } }, - "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " + "query": "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) VALUES ($1, $2, 'queued', now(), now())\n ON CONFLICT (l1_batch_number) DO NOTHING" }, - "65bf55ff4ac5c4ac60bedd7c5b39d82f6e8793859749a7b6ab56121f623ed840": { + "5543380548ce40063d43c1d54e368c7d385800d7ade9e720306808cc4c376978": { "describe": { "columns": [ { @@ -4885,265 +4380,214 @@ "type_info": "Int8" }, { - "name": "commit_gas?", + "name": "timestamp", "ordinal": 1, "type_info": "Int8" }, { - "name": "commit_base_gas_price?", + "name": "is_finished", "ordinal": 2, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "commit_priority_gas_price?", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "prove_gas?", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "prove_base_gas_price?", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "prove_priority_gas_price?", + "name": "bloom", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "execute_gas?", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Int8" + "type_info": "ByteaArray" }, { - "name": "execute_base_gas_price?", + "name": "hash", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "execute_priority_gas_price?", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int8" - } - ], - "nullable": [ - false, - true, - false, - false, - true, - false, - false, - true, - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT\n l1_batches.number,\n commit_tx_data.gas_used as \"commit_gas?\",\n commit_tx.base_fee_per_gas as \"commit_base_gas_price?\",\n commit_tx.priority_fee_per_gas as \"commit_priority_gas_price?\",\n prove_tx_data.gas_used as \"prove_gas?\",\n prove_tx.base_fee_per_gas as \"prove_base_gas_price?\",\n prove_tx.priority_fee_per_gas as \"prove_priority_gas_price?\",\n execute_tx_data.gas_used as \"execute_gas?\",\n execute_tx.base_fee_per_gas as \"execute_base_gas_price?\",\n execute_tx.priority_fee_per_gas as \"execute_priority_gas_price?\"\n FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as commit_tx_data\n ON (l1_batches.eth_commit_tx_id = commit_tx_data.id)\n LEFT JOIN eth_txs_history as prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as prove_tx_data\n ON (l1_batches.eth_prove_tx_id = prove_tx_data.id)\n LEFT JOIN eth_txs_history as execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs as execute_tx_data\n ON (l1_batches.eth_execute_tx_id = execute_tx_data.id)\n WHERE l1_batches.number = $1\n " - }, - "66a3761aec92aa8794e55ddd8299879e915e8ef84f8be9ebca9881c77438d2c8": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "value", - "ordinal": 0, + "name": "commitment", + "ordinal": 10, "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n SELECT value FROM storage_logs\n WHERE hashed_key = $1 AND miniblock_number <= $2\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n " - }, - "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Time", - "Bytea", - "Text", - "Int8" - ] - } - }, - "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " - }, - "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { - "describe": { - "columns": [ + }, { - "name": "total_transactions!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " - }, - "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { - "describe": { - "columns": [ + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, { - "name": "l2_to_l1_logs", - "ordinal": 0, - "type_info": "ByteaArray" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" - }, - "67efc7ea5bd3821d8325759ed8357190f6122dd2ae503a57faf15d8b749a4361": { - "describe": { - "columns": [ + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " - }, - "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { - "describe": { - "columns": [ + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" }, { - "name": "data?", - "ordinal": 1, - "type_info": "Jsonb" + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" }, { - "name": "contract_address?", - "ordinal": 2, + "name": "created_at", + "ordinal": 16, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false, - true - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " - }, - "6c81c5a55d595d0790ac20ca202ff3083b0677c47872f2eb1c65e568dd7c156a": { - "describe": { - "columns": [ + }, { - "name": "miniblock_number", - "ordinal": 0, + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, "type_info": "Int8" }, { - "name": "log_index_in_miniblock", - "ordinal": 1, - "type_info": "Int4" + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" }, { - "name": "log_index_in_tx", - "ordinal": 2, - "type_info": "Int4" + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" }, { - "name": "tx_hash", - "ordinal": 3, + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, "type_info": "Bytea" }, { - "name": "block_hash", - "ordinal": 4, + "name": "compressed_repeated_writes", + "ordinal": 27, "type_info": "Bytea" }, { - "name": "l1_batch_number?", - "ordinal": 5, - "type_info": "Int8" + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" }, { - "name": "shard_id", - "ordinal": 6, + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, "type_info": "Int4" }, { - "name": "is_service", - "ordinal": 7, + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 32, "type_info": "Bool" }, { - "name": "tx_index_in_miniblock", - "ordinal": 8, - "type_info": "Int4" + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" }, { - "name": "tx_index_in_l1_batch", - "ordinal": 9, - "type_info": "Int4" + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" }, { - "name": "sender", - "ordinal": 10, + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 36, + "type_info": "Int8" + }, + { + "name": "aux_data_hash", + "ordinal": 37, "type_info": "Bytea" }, { - "name": "key", - "ordinal": 11, + "name": "pass_through_data_hash", + "ordinal": 38, "type_info": "Bytea" }, { - "name": "value", - "ordinal": 12, + "name": "meta_parameters_hash", + "ordinal": 39, "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" } ], "nullable": [ @@ -5151,41 +4595,71 @@ false, false, false, - null, - null, - false, false, false, false, false, - false, - false + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + false, + false, + false ], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "\n SELECT\n miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value\n FROM l2_to_l1_logs\n WHERE tx_hash = $1\n ORDER BY log_index_in_tx ASC\n " + "query": "SELECT * FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" }, - "6d923b755e1762ebc499cf2c6d7e894357e7b55f3342be08071e2be183ad2a00": { + "5563da0d52ca7310ae7bc957caa5d8b3dcbd9386bb2a0be68dcd21ebb044cdbd": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "bytecode_hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "merkel_tree_paths_blob_url", + "name": "bytecode", "ordinal": 1, - "type_info": "Text" + "type_info": "Bytea" } ], "nullable": [ false, - true + false ], "parameters": { "Left": [ @@ -5193,54 +4667,39 @@ ] } }, - "query": "\n SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND merkel_tree_paths_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " - }, - "6de96eb86301418de9a4342cd66447afd6eb42759d36e164e36adddbd42e98e2": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE execute_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + "query": "SELECT bytecode_hash, bytecode FROM factory_deps INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number WHERE miniblocks.l1_batch_number = $1" }, - "6ebe0d6a315050d72ffead2dd695f0ba1926a3f4a1ed56b3f291d0f41b72c4d4": { + "55debba852ef32f3b5ba6ffcb745f7b59d6888a21cb8792f8f9027e3b164a245": { "describe": { "columns": [ { - "name": "hashed_key!", + "name": "region", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value?", + "name": "zone", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Text" + }, + { + "name": "total_gpus", + "ordinal": 2, + "type_info": "Int8" } ], "nullable": [ - null, + false, + false, null ], "parameters": { - "Left": [ - "ByteaArray", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT u.hashed_key as \"hashed_key!\",\n (SELECT value FROM storage_logs\n WHERE hashed_key = u.hashed_key AND miniblock_number < $2\n ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\"\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n " + "query": "\n SELECT region, zone, SUM(num_gpu) AS total_gpus\n FROM gpu_prover_queue\n GROUP BY region, zone\n " }, - "6f9edffc50202b888d12f80e57a2a346d865e522aa5a02fe3fcfa155406227a4": { + "560f088f500d3c369453453b2e5903253eee00a49690c309ab7f3a0131a0a467": { "describe": { "columns": [ { @@ -5464,116 +4923,146 @@ ] } }, - "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" + "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n FOR UPDATE\n ) as subquery\n WHERE transactions.hash = subquery.hash\n RETURNING transactions.*" }, - "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { + "57742ed088179b89b50920a2ab1a103b745598ee0ba05d1793fc54e63b477319": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray", + "Int4", + "Int8", "Int8" ] } }, - "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " + "query": "UPDATE l1_batches SET eth_commit_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, - "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { + "57b4e8fb728f1e90dc5ed80c1493471f8e9eff828c99eadc531b28a068ade83e": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "count!", "ordinal": 0, "type_info": "Int8" }, { - "name": "basic_circuits_blob_url", + "name": "circuit_type!", "ordinal": 1, "type_info": "Text" }, { - "name": "basic_circuits_inputs_blob_url", + "name": "status!", "ordinal": 2, "type_info": "Text" } ], "nullable": [ + null, false, - true, - true + false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "\n SELECT COUNT(*) as \"count!\", circuit_type as \"circuit_type!\", status as \"status!\"\n FROM prover_jobs\n GROUP BY circuit_type, status\n " }, - "7229ddaadb494c5723946a1e917840eb6035b7d0923518aac7ba2fb81c711d7b": { + "580d973b404123108e8e8b27cd754f108a289e1556da10a466e4c795fbd23ddf": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Int8", - "Bytea", "Int4", - "Int4", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea" + "Int4" ] } }, - "query": "\n INSERT INTO miniblocks (\n number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())\n " + "query": "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now()\n WHERE id = $1 AND sent_at_block IS NULL" }, - "766119f845a7a11b6a5bb2a29bab32e2890df772b13e1a378222e089736fd3bf": { + "59b10abd699d19cbdf285334162ee40f294c5fad8f99fc00a4cdb3b233a494d6": { "describe": { "columns": [ { - "name": "number!", + "name": "tx_hash", "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COALESCE(max(number), 0) as \"number!\" FROM l1_batches\n WHERE eth_prove_tx_id IS NOT NULL" - }, - "769c021b51b9aaafdf27b4019834729047702b17b0684f7271eecd6ffdf96e7c": { - "describe": { - "columns": [ + "type_info": "Bytea" + }, { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" + "name": "topic2!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "topic3!", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "value!", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "l1_address!", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "l2_address!", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "symbol!", + "ordinal": 6, + "type_info": "Varchar" + }, + { + "name": "name!", + "ordinal": 7, + "type_info": "Varchar" + }, + { + "name": "decimals!", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" } ], "nullable": [ - false + false, + false, + false, + false, + false, + false, + false, + false, + false, + true ], "parameters": { - "Left": [] + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number\n WHERE swj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 2\n GROUP BY prover_jobs.l1_batch_number\n HAVING COUNT(*) = 1)\n RETURNING l1_batch_number;\n " + "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON\n events.topic4 = ('\\x000000000000000000000000'::bytea || tokens.l2_address)\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " }, - "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { + "5a27a65fa105897b60a99c1e0015e4b8c93c45e0c448e77b03565db5c36695ed": { "describe": { "columns": [ { - "name": "count!", + "name": "max", "ordinal": 0, "type_info": "Int8" } @@ -5585,33 +5074,40 @@ "Left": [] } }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " + "query": "SELECT MAX(l1_batch_number) FROM witness_inputs WHERE merkel_tree_paths_blob_url IS NOT NULL" }, - "7b90e1c16196f0ee29d7278689fe0ac0169093a11b95edf97c729370fadcb73e": { + "5a5844af61cc685a414fcd3cad70900bdce8f48e905c105f8dd50dc52e0c6f14": { "describe": { "columns": [ { "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "attempts", + "ordinal": 1, + "type_info": "Int4" } ], "nullable": [ + false, false ], "parameters": { "Left": [ - "Bytea" + "Text", + "Int8" ] } }, - "query": "\n SELECT l1_batch_number FROM initial_writes\n WHERE hashed_key = $1\n " + "query": "\n UPDATE prover_jobs\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n RETURNING l1_batch_number, attempts\n " }, - "7ca78be8b18638857111cdbc6117ed2c204e3eb22682d5e4553ac4f47efab6e2": { + "5ac872e2c5a00b376cc053324b3776ef6a0bb7f6850e5a24a133dfee052c49e1": { "describe": { "columns": [ { - "name": "hash", + "name": "value", "ordinal": 0, "type_info": "Bytea" } @@ -5621,320 +5117,292 @@ ], "parameters": { "Left": [ - "Int8" - ] - } - }, - "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1\n RETURNING hash\n " - }, - "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Timestamp" + "Bytea" ] } }, - "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" + "query": "SELECT value FROM storage WHERE hashed_key = $1" }, - "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { + "5b2935b5b7e8c2907f5e221a6b1e6f4b8737b9fc618c5d021a3e1d58a3aed116": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Jsonb" + "Text", + "Int8" ] } }, - "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'failed', error = $1, updated_at = now()\n WHERE id = $2\n " }, - "7d4210089c5abb84befec962fc769b396ff7ad7da212d079bd4460f9ea4d60dc": { + "5bc8a41ae0f255b966df2102f1bd9059d55833e0afaf6e62c7ddcc9c06de8deb": { "describe": { "columns": [ { - "name": "l1_batch_number?", + "name": "l1_batch_number!", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "aggregation_round", + "ordinal": 1, + "type_info": "Int4" } ], "nullable": [ - null + null, + false ], "parameters": { "Left": [] } }, - "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\" FROM (\n SELECT MIN(l1_batch_number) as \"l1_batch_number\"\n FROM prover_jobs\n WHERE status = 'successful' OR aggregation_round < 3\n GROUP BY l1_batch_number\n HAVING MAX(aggregation_round) < 3\n ) as inn\n " + "query": "SELECT MAX(l1_batch_number) as \"l1_batch_number!\", aggregation_round FROM prover_jobs \n WHERE status='successful'\n GROUP BY aggregation_round \n " }, - "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { + "5bc8cdc7ed710bb2f9b0035654fd7e9dcc01731ca581c6aa75d55184817bc100": { "describe": { "columns": [ { "name": "number", "ordinal": 0, "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL" + }, + "5df806b33f84893d4ddfacf3b289b0e173e85ad9204cbb7ad314e68a94cdc41e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int2", + "Int4", + "Int4" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET aggregations_url = $1, number_of_dependent_jobs = $5, updated_at = now()\n WHERE l1_batch_number = $2\n AND circuit_id = $3\n AND depth = $4\n " + }, + "5e09f2359dd69380c1f183f613d82696029a56896e2b985738a2fa25d6cb8a71": { + "describe": { + "columns": [ + { + "name": "op_id", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT MAX(priority_op_id) as \"op_id\" from transactions where is_priority = true" + }, + "5f037f6ae8489d5224772d4f9e3e6cfc2075560957fa491d97a95c0e79ff4830": { + "describe": { + "columns": [ + { + "name": "block_batch?", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "timestamp", + "name": "max_batch?", "ordinal": 1, "type_info": "Int8" + } + ], + "nullable": [ + null, + null + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT (SELECT l1_batch_number FROM miniblocks WHERE number = $1) as \"block_batch?\", (SELECT MAX(number) + 1 FROM l1_batches) as \"max_batch?\"" + }, + "5f4b1091b74424ffd20c0aede98287418afa2bb37dbc941200c1d6190c96bec5": { + "describe": { + "columns": [ + { + "name": "timestamp", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT timestamp FROM l1_batches WHERE eth_commit_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" + }, + "5f5974e7033eea82896a435c7776a6740f4a2df77175744a9670d3fee2f24b32": { + "describe": { + "columns": [ + { + "name": "address", + "ordinal": 0, + "type_info": "Bytea" }, { - "name": "is_finished", + "name": "topic1", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "topic2", "ordinal": 2, - "type_info": "Bool" + "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "topic3", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "topic4", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "value", "ordinal": 5, "type_info": "Bytea" }, { - "name": "bloom", + "name": "block_hash", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "l1_batch_number?", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Int8" }, { - "name": "hash", + "name": "miniblock_number", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "parent_hash", + "name": "tx_hash", "ordinal": 9, "type_info": "Bytea" }, { - "name": "commitment", + "name": "tx_index_in_block", "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, "type_info": "Int4" }, { - "name": "eth_commit_tx_id", - "ordinal": 14, + "name": "event_index_in_block", + "ordinal": 11, "type_info": "Int4" }, { - "name": "eth_execute_tx_id", - "ordinal": 15, + "name": "event_index_in_tx", + "ordinal": 12, "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + null, + null, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " + }, + "62e8b4afd4df9e30bfa08cb30c74ba4566fa2e9f4934b7a2777f9e90b49e8fce": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4" + ] + } + }, + "query": "DELETE FROM eth_txs_history\n WHERE id = $1" + }, + "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { + "describe": { + "columns": [ { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, + "name": "version", + "ordinal": 0, + "type_info": "Text" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Text" + ] + } + }, + "query": "SELECT version FROM compiler_versions WHERE compiler = $1 ORDER by version" + }, + "64b1bce209f43ee9f8294a270047cd58c20b973d8fef29c662742cad89363ffe": { + "describe": { + "columns": [ { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" + "name": "status", + "ordinal": 0, + "type_info": "Text" }, { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" + "name": "error", + "ordinal": 1, + "type_info": "Text" }, { - "name": "initial_bootloader_heap_content", - "ordinal": 24, + "name": "compilation_errors", + "ordinal": 2, "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, - { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" - }, - { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, - { - "name": "bootloader_code_hash", - "ordinal": 33, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" - }, - { - "name": "aux_data_hash", - "ordinal": 37, - "type_info": "Bytea" - }, - { - "name": "pass_through_data_hash", - "ordinal": 38, - "type_info": "Bytea" - }, - { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" } ], "nullable": [ false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, true, - false, - false, - false + true ], "parameters": { "Left": [ @@ -5942,457 +5410,1763 @@ ] } }, - "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" + "query": "\n SELECT status, error, compilation_errors FROM contract_verification_requests\n WHERE id = $1\n " + }, + "657e576ab02338ce40ae905acdbc1d372f4c1b4c50f8690a23e04824716b8674": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO miniblocks (number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())" }, - "831e1beb42dab1dc4e9b585bb35ce568196e7f46cb655357fdf5437ece519270": { + "665112c83ed7f126f94d1c47408de3495ee6431970e334d94ae75f853496eb48": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Text", "Int8" ] } }, - "query": "\n UPDATE miniblocks\n SET l1_batch_number = $1\n WHERE l1_batch_number IS NULL\n " + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " }, - "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { + "67a47f1e7d5f8dafcef94bea3f268b4baec1888c6ef11c92ab66480ecdcb9aef": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Time", + "Bytea", + "Text", "Int8" ] } }, - "query": "DELETE FROM l1_batches WHERE number > $1" + "query": "\n UPDATE prover_jobs\n SET status = 'successful', updated_at = now(), time_taken = $1, result = $2, proccesed_by = $3\n WHERE id = $4\n " }, - "88c49ebeb45f7208d223de59ec08a332beac765644e4f29ed855808b8f9cef91": { + "67b861c97d16bf99a2d305c100116cbcb0334473c4462e4164436885481197fb": { "describe": { "columns": [ { - "name": "id", + "name": "total_transactions!", "ordinal": 0, "type_info": "Int8" - }, - { - "name": "circuit_input_blob_url", - "ordinal": 1, - "type_info": "Text" } ], "nullable": [ - false, - true + null ], "parameters": { "Left": [ - "Int8" + "Bytea" ] } }, - "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + "query": "\n SELECT COUNT(*) as \"total_transactions!\"\n FROM transactions\n WHERE contract_address = $1\n " }, - "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { + "67ecdc69e39e689f1f23f867d31e6b8c47e9c041e18cbd84a2ad6482a9be4e74": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "l2_to_l1_logs", + "ordinal": 0, + "type_info": "ByteaArray" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ - "Int4", - "Int8", "Int8" ] } }, - "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1" }, - "8a5adf70b154ced83daf6bd085203762380afab2363fa65ff5b7f9df22f48616": { + "67efc7ea5bd3821d8325759ed8357190f6122dd2ae503a57faf15d8b749a4361": { "describe": { "columns": [ { - "name": "hash", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " + }, + "697835cdd5be1b99a0f332c4c8f3245e317b0282b46e55f15e728a7642382b25": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "is_priority", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "full_fee", + "name": "circuit_id", "ordinal": 2, - "type_info": "Numeric" + "type_info": "Int2" }, { - "name": "layer_2_tip_fee", + "name": "aggregation_round", "ordinal": 3, - "type_info": "Numeric" + "type_info": "Int2" }, { - "name": "initiator_address", + "name": "sequence_number", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "nonce", + "name": "depth", "ordinal": 5, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "signature", + "name": "is_node_final_proof", "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, - { - "name": "priority_op_id", + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Time", + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1, proof_blob_url=$2\n WHERE id = $3\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + }, + "6ac39e83e446e70a2875624db78a05e56eb35f46e11d0f2fbb2165cda56fbacd": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "data?", + "ordinal": 1, + "type_info": "Jsonb" + }, + { + "name": "contract_address?", + "ordinal": 2, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + true + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT factory_deps.bytecode, transactions.data as \"data?\", transactions.contract_address as \"contract_address?\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY miniblock_number DESC, operation_number DESC\n LIMIT 1\n ) storage_logs\n JOIN factory_deps ON factory_deps.bytecode_hash = storage_logs.value\n LEFT JOIN transactions ON transactions.hash = storage_logs.tx_hash\n WHERE storage_logs.value != $2\n " + }, + "715aba794d60ce2faf937eacd9498b203dbb8e620d6d8850b9071cd72902ffbf": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "Int8" + ] + } + }, + "query": "INSERT INTO factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) SELECT u.bytecode_hash, u.bytecode, $3, now(), now() FROM UNNEST($1::bytea[], $2::bytea[]) AS u(bytecode_hash, bytecode) ON CONFLICT (bytecode_hash) DO NOTHING" + }, + "71df95e25f719ed9bc32622b33c1da0aad14c6ad1a96f25454ce8618470c2ea3": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int8" + ] + } + }, + "query": "INSERT INTO initial_writes (hashed_key, l1_batch_number, created_at, updated_at)\n SELECT u.hashed_key, $2, now(), now()\n FROM UNNEST($1::bytea[]) AS u(hashed_key)\n ON CONFLICT (hashed_key) DO NOTHING\n " + }, + "721367902328f9e2e5f8a99820b11d230c60553db366fc76f97c5680470bece8": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "basic_circuits_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "basic_circuits_inputs_blob_url", + "ordinal": 2, + "type_info": "Text" + } + ], + "nullable": [ + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + }, + "741b13b0a4769a30186c650a4a1b24855806a27ccd8d5a50594741842dde44ec": { + "describe": { + "columns": [ + { + "name": "min?", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "max?", + "ordinal": 1, + "type_info": "Int8" + } + ], + "nullable": [ + null, + null + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\" FROM miniblocks WHERE l1_batch_number = $1" + }, + "751c8e5ed1fc211dbb4c7419a316c5f4e49a7f0b4f3a5c74c2abd8daebc457dd": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT l1_batch_number FROM miniblocks WHERE number = $1" + }, + "769c021b51b9aaafdf27b4019834729047702b17b0684f7271eecd6ffdf96e7c": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN scheduler_witness_jobs swj ON prover_jobs.l1_batch_number = swj.l1_batch_number\n WHERE swj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 2\n GROUP BY prover_jobs.l1_batch_number\n HAVING COUNT(*) = 1)\n RETURNING l1_batch_number;\n " + }, + "7717652bb4933f87cbeb7baa2e70e8e0b439663c6b15493bd2e406bed2486b42": { + "describe": { + "columns": [ + { + "name": "max", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Float8" + ] + } + }, + "query": "SELECT max(l1_batches.number) FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1" + }, + "7889294ffe999d3c8b3b093d3add7f9b826e8259451068aeaeca0da0772648e8": { + "describe": { + "columns": [ + { + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\"\n FROM contract_verification_requests\n WHERE status = 'queued'\n " + }, + "79420f7676acb3f17aeb538271cdb4067a342fd554adcf7bd0550b6682b4c82b": { + "describe": { + "columns": [ + { + "name": "tx_hash", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "call_trace", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT * FROM call_traces WHERE tx_hash IN (SELECT hash FROM transactions WHERE miniblock_number = $1)" + }, + "7acba1f016450b084a5fd97199a757a471f8b8a880a800c29737f1bceae3ff46": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "merkel_tree_paths_blob_url", + "ordinal": 1, + "type_info": "Text" + } + ], + "nullable": [ + false, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs WHERE status = 'successful' AND is_blob_cleaned = FALSE AND merkel_tree_paths_blob_url is NOT NULL AND updated_at < NOW() - INTERVAL '30 days' LIMIT $1" + }, + "7bbb3ba8c9860818d04bad46dee94f59d054619c961fd3d59d26fcb364598d5d": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text", + "Int4" + ] + } + }, + "query": "\n INSERT INTO leaf_aggregation_witness_jobs_fri\n (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id)\n DO UPDATE SET updated_at=now()\n " + }, + "7ca78be8b18638857111cdbc6117ed2c204e3eb22682d5e4553ac4f47efab6e2": { + "describe": { + "columns": [ + { + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "UPDATE transactions\n SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}'\n WHERE miniblock_number > $1\n RETURNING hash\n " + }, + "7cf855c4869db43b765b92762402596f6b97b3717735b6d87a16a5776f2eca71": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Numeric", + "Timestamp" + ] + } + }, + "query": "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1" + }, + "7d3a57126f111ebe51d678b91f64c34b8394df3e7b1d59ca80b6eca01c606da4": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Jsonb" + ] + } + }, + "query": "\n INSERT INTO contracts_verification_info\n (address, verification_info)\n VALUES ($1, $2)\n ON CONFLICT (address)\n DO UPDATE SET verification_info = $2\n " + }, + "7d4210089c5abb84befec962fc769b396ff7ad7da212d079bd4460f9ea4d60dc": { + "describe": { + "columns": [ + { + "name": "l1_batch_number?", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "\n SELECT MIN(l1_batch_number) as \"l1_batch_number?\" FROM (\n SELECT MIN(l1_batch_number) as \"l1_batch_number\"\n FROM prover_jobs\n WHERE status = 'successful' OR aggregation_round < 3\n GROUP BY l1_batch_number\n HAVING MAX(aggregation_round) < 3\n ) as inn\n " + }, + "7df997e5a203e8df350b1346863fddf26d32123159213c02e8794c39240e48dc": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "UPDATE miniblocks SET l1_batch_number = $1 WHERE l1_batch_number IS NULL" + }, + "7e3623674226e5bb934f7769cdf595138015ad346e12074398fd57dbc03962d3": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "is_finished", + "ordinal": 2, + "type_info": "Bool" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "fee_account_address", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "bloom", + "ordinal": 6, + "type_info": "Bytea" + }, + { + "name": "priority_ops_onchain_data", + "ordinal": 7, + "type_info": "ByteaArray" + }, + { + "name": "hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "parent_hash", + "ordinal": 9, + "type_info": "Bytea" + }, + { + "name": "commitment", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, + "type_info": "Int8" + }, + { + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" + }, + { + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" + }, + { + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, + { + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "compressed_repeated_writes", + "ordinal": 27, + "type_info": "Bytea" + }, + { + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, + "type_info": "Int4" + }, + { + "name": "rollup_last_leaf_index", + "ordinal": 31, + "type_info": "Int8" + }, + { + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 33, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" + }, + { + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 36, + "type_info": "Int8" + }, + { + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" + }, + { + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" + }, + { + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" + }, + { + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" + }, + { + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 42, + "type_info": "Int8" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL ORDER BY number LIMIT $1" + }, + "8045a697a6a1070857b6fdc656f60ee6bab4b3a875ab98099beee227c199f818": { + "describe": { + "columns": [ + { + "name": "miniblock_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "log_index_in_miniblock", + "ordinal": 1, + "type_info": "Int4" + }, + { + "name": "log_index_in_tx", + "ordinal": 2, + "type_info": "Int4" + }, + { + "name": "tx_hash", + "ordinal": 3, + "type_info": "Bytea" + }, + { + "name": "block_hash", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "l1_batch_number?", + "ordinal": 5, + "type_info": "Int8" + }, + { + "name": "shard_id", + "ordinal": 6, + "type_info": "Int4" + }, + { + "name": "is_service", + "ordinal": 7, + "type_info": "Bool" + }, + { + "name": "tx_index_in_miniblock", + "ordinal": 8, + "type_info": "Int4" + }, + { + "name": "tx_index_in_l1_batch", + "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "sender", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "key", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "value", + "ordinal": 12, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + null, + null, + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\", shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value FROM l2_to_l1_logs WHERE tx_hash = $1 ORDER BY log_index_in_tx ASC" + }, + "84b6ac6bc44503de193e0e4e1201ffd200eddf690722659dad6ddea0604427dc": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "depth", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "status", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 5, + "type_info": "Int2" + }, + { + "name": "aggregations_url", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 8, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "created_at", "ordinal": 10, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "number_of_dependent_jobs", + "ordinal": 12, + "type_info": "Int4" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false, + true + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM node_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, depth ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING node_aggregation_witness_jobs_fri.*\n " + }, + "85c52cb09c73499507144e3a684c3230c2c71eb4f8ddef43e67fbd33de2747c8": { + "describe": { + "columns": [ + { + "name": "timestamp", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT timestamp, hash FROM l1_batches WHERE number = $1" + }, + "87e1ae393bf250f834704c940482884c9ed729a24f41d1ec07319fa0cbcc21a7": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM l1_batches WHERE number > $1" + }, + "88c49ebeb45f7208d223de59ec08a332beac765644e4f29ed855808b8f9cef91": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "circuit_input_blob_url", + "ordinal": 1, + "type_info": "Text" + } + ], + "nullable": [ + false, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " + }, + "89b124c78f4f6e86790af8ec391a2c486ce01b33cfb4492a443187b1731cae1e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET eth_prove_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + }, + "8a05b6c052ace9b5a383b301f3f441536d90a96bbb791f4711304b22e02193df": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Int8" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE id = $2\n " + }, + "8a35349a1aa79ac111e442df2cf3f31ecbebe3de7763554b5beb2210ebaa4dc6": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "l1_tx_count", + "ordinal": 3, + "type_info": "Int4" + }, + { + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Numeric" + }, + { + "name": "l1_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 9, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash FROM miniblocks WHERE number = $1" + }, + "8b881a834dc813ac5bd4dcd2f973d34ae92cafa929ce933982704d4afe13f972": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, "type_info": "Int8" }, { - "name": "l1_batch_number", - "ordinal": 11, + "name": "l1_batch_number!", + "ordinal": 1, "type_info": "Int8" }, { - "name": "index_in_block", - "ordinal": 12, + "name": "timestamp", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "l1_tx_count", + "ordinal": 3, "type_info": "Int4" }, { - "name": "error", + "name": "l2_tx_count", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "root_hash?", + "ordinal": 5, + "type_info": "Bytea" + }, + { + "name": "commit_tx_hash?", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "committed_at?", + "ordinal": 7, + "type_info": "Timestamp" + }, + { + "name": "prove_tx_hash?", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "proven_at?", + "ordinal": 9, + "type_info": "Timestamp" + }, + { + "name": "execute_tx_hash?", + "ordinal": 10, + "type_info": "Text" + }, + { + "name": "executed_at?", + "ordinal": 11, + "type_info": "Timestamp" + }, + { + "name": "l1_gas_price", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", "ordinal": 13, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "gas_limit", + "name": "bootloader_code_hash", "ordinal": 14, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_storage_limit", + "name": "default_aa_code_hash", "ordinal": 15, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "fee_account_address?", "ordinal": 16, - "type_info": "Numeric" + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + false, + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + }, + "8d3c9575e3cea3956ba84edc982fcf6e0f7667350e6c2cd6801db8400eabaf9b": { + "describe": { + "columns": [ + { + "name": "hashed_key", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT DISTINCT ON (hashed_key) hashed_key FROM (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn" + }, + "8d48fb84bd08f6103fe28d13331f4e3422b61adab6037e8760b0ca7b1a48907e": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" + "name": "scheduler_partial_input_blob_url", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "status", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "processing_started_at", + "ordinal": 3, + "type_info": "Timestamp" + }, + { + "name": "time_taken", + "ordinal": 4, + "type_info": "Time" + }, + { + "name": "error", + "ordinal": 5, + "type_info": "Text" }, { "name": "created_at", - "ordinal": 18, + "ordinal": 6, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 19, + "ordinal": 7, "type_info": "Timestamp" }, { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, + "name": "attempts", + "ordinal": 8, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false, + true, + true, + true, + false, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs_fri.*\n " + }, + "8dcbaaa6186da52ca8b440b6428826288dc668af5a6fc99ef3078c8bcb38c419": { + "describe": { + "columns": [ { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" + "name": "circuit_id", + "ordinal": 1, + "type_info": "Int2" }, { - "name": "l1_block_number", - "ordinal": 23, + "name": "depth", + "ordinal": 2, "type_info": "Int4" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status='queued'\n WHERE (l1_batch_number, circuit_id, depth) IN\n (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth\n FROM prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs\n HAVING COUNT(*) = nawj.number_of_dependent_jobs)\n RETURNING l1_batch_number, circuit_id, depth;\n " + }, + "8de48960815f48f5d66e82b770a2e0caee42261643ec535a8f21cba1b5d4f50d": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" }, { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" }, { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" }, { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" + "name": "closed_form_inputs_blob_url", + "ordinal": 3, + "type_info": "Text" }, { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" + "name": "attempts", + "ordinal": 4, + "type_info": "Int2" + }, + { + "name": "status", + "ordinal": 5, + "type_info": "Text" }, { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" + "name": "error", + "ordinal": 6, + "type_info": "Text" }, { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" + "name": "created_at", + "ordinal": 7, + "type_info": "Timestamp" }, { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" + "name": "updated_at", + "ordinal": 8, + "type_info": "Timestamp" }, { - "name": "l1_batch_tx_index", - "ordinal": 31, - "type_info": "Int4" + "name": "processing_started_at", + "ordinal": 9, + "type_info": "Timestamp" }, { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" + "name": "time_taken", + "ordinal": 10, + "type_info": "Time" }, { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" + "name": "is_blob_cleaned", + "ordinal": 11, + "type_info": "Bool" }, { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" + "name": "number_of_basic_circuits", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ false, false, - true, - true, - false, - true, - true, - true, - false, false, true, - true, - true, - true, - true, - true, - true, - true, false, false, - false, - true, - false, true, false, false, - false, true, true, true, - true, - true, - false, - true, true ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM leaf_aggregation_witness_jobs_fri\n WHERE status = 'queued'\n ORDER BY l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING leaf_aggregation_witness_jobs_fri.*\n " + }, + "8fa1a390d7b11b60b3352fafc0a8a7fa15bc761b1bb902f5105fd66b2e3087f2": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "\n SELECT * FROM transactions\n WHERE miniblock_number = $1\n ORDER BY index_in_block\n " + "query": "\n INSERT INTO scheduler_dependency_tracker_fri\n (l1_batch_number, status, created_at, updated_at)\n VALUES ($1, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " }, - "8b881a834dc813ac5bd4dcd2f973d34ae92cafa929ce933982704d4afe13f972": { + "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { "describe": { "columns": [ { - "name": "number", + "name": "l1_address", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_address FROM tokens WHERE well_known = false" + }, + "9008367aad7877f269b765c4d0772d0f60689fcde6987c620fe5749a259a8db7": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8", + "Text", + "Bytea" + ] + } + }, + "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" + }, + "908f10640f805957e3f77ed685a7170345d835166e1857c12d76c15b09dffff5": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Int4", + "Text", + "Int4" + ] + } + }, + "query": "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number, circuit_id, depth)\n DO UPDATE SET updated_at=now()" + }, + "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { + "describe": { + "columns": [ + { + "name": "value", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8" + ] + } + }, + "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " + }, + "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" }, { - "name": "l1_batch_number!", + "name": "nonce", "ordinal": 1, "type_info": "Int8" }, { - "name": "timestamp", + "name": "raw_tx", "ordinal": 2, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "contract_address", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "l2_tx_count", + "name": "tx_type", "ordinal": 4, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "root_hash?", + "name": "gas_used", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "commit_tx_hash?", + "name": "created_at", "ordinal": 6, - "type_info": "Text" + "type_info": "Timestamp" }, { - "name": "committed_at?", + "name": "updated_at", "ordinal": 7, "type_info": "Timestamp" }, { - "name": "prove_tx_hash?", + "name": "has_failed", "ordinal": 8, - "type_info": "Text" + "type_info": "Bool" }, { - "name": "proven_at?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "execute_tx_hash?", + "name": "confirmed_eth_tx_history_id", "ordinal": 10, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "executed_at?", + "name": "predicted_gas_cost", "ordinal": 11, - "type_info": "Timestamp" - }, - { - "name": "l1_gas_price", - "ordinal": 12, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 13, "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 14, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 15, - "type_info": "Bytea" - }, + } + ], + "nullable": [ + false, + false, + false, + false, + false, + true, + false, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Text", + "Text", + "Int8" + ] + } + }, + "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" + }, + "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" + ] + } + }, + "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " + }, + "96b1cd2bb6861064b633d597a4a09d279dbc7bcd7a810a7270da3d7941af0fff": { + "describe": { + "columns": [ { - "name": "fee_account_address?", - "ordinal": 16, - "type_info": "Bytea" + "name": "count!", + "ordinal": 0, + "type_info": "Int8" } ], "nullable": [ - false, - null, - false, - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true, - false + null ], "parameters": { "Left": [ - "Int8" + "Bytea", + "Bytea" ] } }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash as \"root_hash?\",\n commit_tx.tx_hash as \"commit_tx_hash?\",\n commit_tx.confirmed_at as \"committed_at?\",\n prove_tx.tx_hash as \"prove_tx_hash?\",\n prove_tx.confirmed_at as \"proven_at?\",\n execute_tx.tx_hash as \"execute_tx_hash?\",\n execute_tx.confirmed_at as \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE miniblocks.number = $1\n " + "query": "SELECT COUNT(*) as \"count!\" FROM (SELECT * FROM storage_logs WHERE storage_logs.hashed_key = $1 ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC LIMIT 1) sl WHERE sl.value != $2" + }, + "96f6d06a49646f93ba1918080ef1efba868d506c6b51ede981e610f1b57bf88b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM storage WHERE hashed_key = ANY($1)" }, - "8b96fbf5b8adabd76ea2648688c38c4d9917b3736ca53ed3896c35c0da427369": { + "9b4d87f7d7cabe0d61f10d26bb856cce3dc7f36f521efbb6992d98937e5a91ba": { "describe": { "columns": [ { - "name": "bytecode_hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode", - "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int8" } ], "nullable": [ - false, false ], "parameters": { "Left": [ - "Int8" + "Bytea", + "Text", + "Text", + "Text", + "Text", + "Bool", + "Text", + "Bytea", + "Bool" ] } }, - "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number\n WHERE miniblocks.l1_batch_number = $1" + "query": "\n INSERT INTO contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n status,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'queued', now(), now())\n RETURNING id\n " }, - "8fe01036cac5181aabfdc06095da291c4de6b1e0f82f846c37509bb550ef544e": { + "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { "describe": { "columns": [ { - "name": "l1_address", + "name": "successful_limit!", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" + }, + { + "name": "queued_limit!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "max_block!", + "ordinal": 2, + "type_info": "Int8" } ], "nullable": [ - false + null, + null, + null ], "parameters": { "Left": [] } }, - "query": "SELECT l1_address FROM tokens WHERE well_known = false" + "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " + }, + "9c77342759fc71b12f05c2395ac36aabadab1fa64ff585d6349b8053300cf76c": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bool", + "Bytea", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, updated_at = now() WHERE number = $14 AND hash IS NULL" }, - "8fefa3194f469b0f46dc5efcb9e6ccc08159ef6a5681090cb7596877b597bc73": { + "9fccfc087388898a7da57c88c3e14eb6623f90682abf43e293def3580ea1a8dd": { "describe": { "columns": [ { @@ -6546,39 +7320,39 @@ "type_info": "Bytea" }, { - "name": "gas_per_pubdata_byte_in_block", + "name": "l1_gas_price", "ordinal": 30, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "rollup_last_leaf_index", + "name": "l2_fair_gas_price", "ordinal": 31, "type_info": "Int8" }, { - "name": "zkporter_is_available", + "name": "rollup_last_leaf_index", "ordinal": 32, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "bootloader_code_hash", + "name": "zkporter_is_available", "ordinal": 33, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "default_aa_code_hash", + "name": "bootloader_code_hash", "ordinal": 34, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", + "name": "default_aa_code_hash", "ordinal": 35, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", + "name": "base_fee_per_gas", "ordinal": 36, - "type_info": "Int8" + "type_info": "Numeric" }, { "name": "aux_data_hash", @@ -6601,12 +7375,12 @@ "type_info": "Bool" }, { - "name": "l1_gas_price", + "name": "gas_per_pubdata_byte_in_block", "ordinal": 41, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", + "name": "gas_per_pubdata_limit", "ordinal": 42, "type_info": "Int8" } @@ -6642,151 +7416,170 @@ true, true, true, - true, + false, + false, true, true, true, true, false, - false, true, true, true, false, - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT * FROM l1_batches\n ORDER BY number DESC\n LIMIT 1" - }, - "9008367aad7877f269b765c4d0772d0f60689fcde6987c620fe5749a259a8db7": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ + true, false ], "parameters": { "Left": [ - "Int4", - "Int8", "Int8", - "Text", - "Bytea" + "Int8" ] } }, - "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING id" + "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit FROM (SELECT l1_batches.*, row_number() OVER (ORDER BY number ASC) AS row_number FROM l1_batches WHERE eth_commit_tx_id IS NOT NULL AND l1_batches.skip_proof = TRUE AND l1_batches.number > $1 ORDER BY number LIMIT $2) inn WHERE number - row_number = $1" }, - "91db60cc4f98ebcaef1435342607da0a86fe16e20a696cb81a569772d5d5ae88": { + "9feee3fd267dc4e58185aeae7cab798c03eefa69470e4b98716615cecf6c012a": { "describe": { "columns": [ { - "name": "value", + "name": "id", "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "contract_address", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "source_code", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "contract_name", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "zk_compiler_version", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "compiler_version", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "optimization_used", + "ordinal": 6, + "type_info": "Bool" + }, + { + "name": "optimizer_mode", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "constructor_arguments", + "ordinal": 8, "type_info": "Bytea" + }, + { + "name": "is_system", + "ordinal": 9, + "type_info": "Bool" } ], "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false, false ], "parameters": { "Left": [ - "Bytea", - "Int8" + "Interval" ] } }, - "query": "\n SELECT value\n FROM storage_logs\n WHERE storage_logs.hashed_key = $1 AND storage_logs.miniblock_number <= $2\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n " - }, - "928b5c1fbec2b2cfb9293cfe6312f7a0549f47a7cff4981acc0c2fda81079701": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id)\n WHERE prove_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n " }, - "95ce099fde99c57a930ed3d44f74a90d632b831360210ec7fe21b33bed1a4582": { + "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { "columns": [ { - "name": "id", + "name": "address", "ordinal": 0, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "nonce", + "name": "topic1", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "raw_tx", + "name": "topic2", "ordinal": 2, "type_info": "Bytea" }, { - "name": "contract_address", + "name": "topic3", "ordinal": 3, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "tx_type", + "name": "topic4", "ordinal": 4, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "gas_used", + "name": "value", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "block_hash", "ordinal": 6, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "l1_batch_number?", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Int8" }, { - "name": "has_failed", + "name": "miniblock_number", "ordinal": 8, - "type_info": "Bool" + "type_info": "Int8" }, { - "name": "sent_at_block", + "name": "tx_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "confirmed_eth_tx_history_id", + "name": "tx_index_in_block", "ordinal": 10, "type_info": "Int4" }, { - "name": "predicted_gas_cost", + "name": "event_index_in_block", "ordinal": 11, - "type_info": "Int8" + "type_info": "Int4" + }, + { + "name": "event_index_in_tx", + "ordinal": 12, + "type_info": "Int4" } ], "nullable": [ @@ -6795,297 +7588,204 @@ false, false, false, - true, + false, + null, + null, + false, false, false, false, - true, - true, false ], "parameters": { "Left": [ - "Bytea", - "Int8", - "Text", - "Text", - "Int8" + "Bytea" ] } }, - "query": "INSERT INTO eth_txs (raw_tx, nonce, tx_type, contract_address, predicted_gas_cost, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, now(), now())\n RETURNING *" + "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, - "95e0e783794ac55ab20b30366f037c313fb0d17e93d3e6ec60667ef1b4da30d5": { + "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Int4", + "Int8", + "Int8" ] } }, - "query": "\n UPDATE prover_jobs\n SET is_blob_cleaned=TRUE\n WHERE id = ANY($1);\n " - }, - "9bf32ea710825c1f0560a7eaa89f8f097ad196755ba82d98a729a2b0d34e1aca": { - "describe": { - "columns": [ - { - "name": "successful_limit!", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "queued_limit!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "max_block!", - "ordinal": 2, - "type_info": "Int8" - } - ], - "nullable": [ - null, - null, - null - ], - "parameters": { - "Left": [] - } - }, - "query": "\n SELECT\n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status NOT IN ('successful', 'skipped')\n ORDER BY l1_batch_number\n LIMIT 1) as \"successful_limit!\",\n \n (SELECT l1_batch_number\n FROM prover_jobs\n WHERE status <> 'queued'\n ORDER BY l1_batch_number DESC\n LIMIT 1) as \"queued_limit!\",\n\n (SELECT MAX(l1_batch_number) as \"max!\" FROM prover_jobs) as \"max_block!\"\n " + "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" }, - "9d2faf0b6f8582f0a2607ddd6e216cccfbea7ff5e99646e3a35420c4d190c5f8": { + "a42626c162a0600b9c7d22dd0d7997fa70cc95296ecc185ff9ae2e03593b07bf": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" + "Int8" ] } }, - "query": "\n UPDATE witness_inputs\n SET merkle_tree_paths=''\n WHERE l1_batch_number = ANY($1);\n " + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status='queued'\n WHERE l1_batch_number = $1\n AND status != 'successful'\n AND status != 'in_progress'\n " }, - "a2758f1cfaac42019e4b11a7fe21d62da2a83b98d997448658ab2855383d6ca4": { + "a482c481a9ffaad4735775282cf6e8d68f284884e7c6f043e9737a0d236f2e97": { "describe": { "columns": [ { - "name": "number", + "name": "tx_hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "topic2!", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "is_finished", + "name": "topic3!", "ordinal": 2, - "type_info": "Bool" + "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "value!", "ordinal": 3, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "l2_tx_count", + "name": "l1_address!", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "l2_address!", "ordinal": 5, "type_info": "Bytea" }, { - "name": "bloom", + "name": "symbol!", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Varchar" }, { - "name": "priority_ops_onchain_data", + "name": "name!", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Varchar" }, { - "name": "hash", + "name": "decimals!", "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "parent_hash", - "ordinal": 9, - "type_info": "Bytea" - }, - { - "name": "commitment", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "compressed_write_logs", - "ordinal": 11, - "type_info": "Bytea" - }, - { - "name": "compressed_contracts", - "ordinal": 12, - "type_info": "Bytea" - }, - { - "name": "eth_prove_tx_id", - "ordinal": 13, "type_info": "Int4" }, { - "name": "eth_commit_tx_id", - "ordinal": 14, - "type_info": "Int4" - }, - { - "name": "eth_execute_tx_id", - "ordinal": 15, - "type_info": "Int4" - }, - { - "name": "created_at", - "ordinal": 16, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 17, - "type_info": "Timestamp" - }, - { - "name": "merkle_root_hash", - "ordinal": 18, - "type_info": "Bytea" - }, - { - "name": "l2_to_l1_logs", - "ordinal": 19, - "type_info": "ByteaArray" - }, - { - "name": "l2_to_l1_messages", - "ordinal": 20, - "type_info": "ByteaArray" - }, - { - "name": "predicted_commit_gas_cost", - "ordinal": 21, - "type_info": "Int8" - }, - { - "name": "predicted_prove_gas_cost", - "ordinal": 22, - "type_info": "Int8" - }, - { - "name": "predicted_execute_gas_cost", - "ordinal": 23, - "type_info": "Int8" - }, - { - "name": "initial_bootloader_heap_content", - "ordinal": 24, - "type_info": "Jsonb" - }, - { - "name": "used_contract_hashes", - "ordinal": 25, - "type_info": "Jsonb" - }, - { - "name": "compressed_initial_writes", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "compressed_repeated_writes", - "ordinal": 27, - "type_info": "Bytea" - }, - { - "name": "l2_l1_compressed_messages", - "ordinal": 28, - "type_info": "Bytea" - }, - { - "name": "l2_l1_merkle_root", - "ordinal": 29, - "type_info": "Bytea" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 30, - "type_info": "Int4" - }, + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + }, + "a4a14eb42b9acca3f93c67e5760ba700c333b5e9a38c132a3060a94c988e7f13": { + "describe": { + "columns": [ { - "name": "rollup_last_leaf_index", - "ordinal": 31, - "type_info": "Int8" + "name": "hash", + "ordinal": 0, + "type_info": "Bytea" }, { - "name": "zkporter_is_available", - "ordinal": 32, - "type_info": "Bool" - }, + "name": "received_at", + "ordinal": 1, + "type_info": "Timestamp" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Timestamp", + "Int8" + ] + } + }, + "query": "SELECT transactions.hash, transactions.received_at FROM transactions LEFT JOIN miniblocks ON miniblocks.number = miniblock_number WHERE received_at > $1 ORDER BY received_at ASC LIMIT $2" + }, + "a7d575d90f9bf19427ddbe342d296effb7c38bc90f213aa1cc94523930dd8f15": { + "describe": { + "columns": [ { - "name": "bootloader_code_hash", - "ordinal": 33, + "name": "tx_hash", + "ordinal": 0, "type_info": "Bytea" }, { - "name": "default_aa_code_hash", - "ordinal": 34, + "name": "l1_sender!", + "ordinal": 1, "type_info": "Bytea" }, { - "name": "base_fee_per_gas", - "ordinal": 35, - "type_info": "Numeric" + "name": "topic2!", + "ordinal": 2, + "type_info": "Bytea" }, { - "name": "gas_per_pubdata_limit", - "ordinal": 36, - "type_info": "Int8" + "name": "value!", + "ordinal": 3, + "type_info": "Bytea" }, { - "name": "aux_data_hash", - "ordinal": 37, + "name": "l1_address!", + "ordinal": 4, "type_info": "Bytea" }, { - "name": "pass_through_data_hash", - "ordinal": 38, + "name": "l2_address!", + "ordinal": 5, "type_info": "Bytea" }, { - "name": "meta_parameters_hash", - "ordinal": 39, - "type_info": "Bytea" + "name": "symbol!", + "ordinal": 6, + "type_info": "Varchar" }, { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" + "name": "name!", + "ordinal": 7, + "type_info": "Varchar" }, { - "name": "l1_gas_price", - "ordinal": 41, - "type_info": "Int8" + "name": "decimals!", + "ordinal": 8, + "type_info": "Int4" }, { - "name": "l2_fair_gas_price", - "ordinal": 42, - "type_info": "Int8" + "name": "usd_price?", + "ordinal": 9, + "type_info": "Numeric" } ], "nullable": [ @@ -7097,118 +7797,151 @@ false, false, false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - true, - true, - true, - false, false, + true + ], + "parameters": { + "Left": [ + "ByteaArray", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " + }, + "a9b1a31def214f8b1441dc3ab720bd270f3991c9f1c7528256276e176d532163": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ false ], "parameters": { "Left": [ - "Float8", - "Int8" + "Bytea" ] } }, - "query": "SELECT l1_batches.* FROM l1_batches JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) JOIN eth_txs_history as commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 ORDER BY number LIMIT $2" + "query": "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1" }, - "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { + "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { "describe": { "columns": [ { - "name": "address", + "name": "count!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [ + "Bytea", + "Numeric", + "Interval", + "Interval" + ] + } + }, + "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " + }, + "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { + "describe": { + "columns": [ + { + "name": "proof", "ordinal": 0, "type_info": "Bytea" }, { - "name": "topic1", + "name": "aggregation_result_coords", "ordinal": 1, "type_info": "Bytea" + } + ], + "nullable": [ + true, + true + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " + }, + "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int4" }, { - "name": "topic2", + "name": "nonce", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "raw_tx", "ordinal": 2, "type_info": "Bytea" }, { - "name": "topic3", + "name": "contract_address", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "topic4", + "name": "tx_type", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value", + "name": "gas_used", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "block_hash", + "name": "created_at", "ordinal": 6, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_batch_number?", + "name": "updated_at", "ordinal": 7, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "miniblock_number", + "name": "has_failed", "ordinal": 8, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "tx_hash", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "tx_index_in_block", + "name": "confirmed_eth_tx_history_id", "ordinal": 10, "type_info": "Int4" }, { - "name": "event_index_in_block", + "name": "predicted_gas_cost", "ordinal": 11, - "type_info": "Int4" - }, - { - "name": "event_index_in_tx", - "ordinal": 12, - "type_info": "Int4" + "type_info": "Int8" } ], "nullable": [ @@ -7217,116 +7950,115 @@ false, false, false, - false, - null, - null, - false, + true, false, false, false, + true, + true, false - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\",\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE tx_hash = $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " - }, - "a3d526a5a341618e9784fc81626143a3174709483a527879254ff8e28f210ac3": { - "describe": { - "columns": [], - "nullable": [], + ], "parameters": { "Left": [ - "Int4", - "Int8", "Int8" ] } }, - "query": "UPDATE l1_batches SET eth_execute_tx_id = $1, updated_at = now() WHERE number BETWEEN $2 AND $3" + "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " }, - "a3d6cbf1f4386b65338db27467087eb77479f739dc9e9e2ac004c5c0350aa99e": { + "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { "describe": { "columns": [ { - "name": "number", + "name": "number!", "ordinal": 0, "type_info": "Int8" - }, + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COALESCE(MAX(number), 0) AS \"number!\" FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL" + }, + "aacaeff95b9a2988167dde78200d7139ba99edfa30dbcd8a7a57f72efc676477": { + "describe": { + "columns": [ { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" + "name": "number", + "ordinal": 0, + "type_info": "Int8" } ], "nullable": [ - false, false ], "parameters": { - "Left": [ - "Int8", - "Int8" - ] + "Left": [] } }, - "query": "\n SELECT number, hash FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n LIMIT $2\n " + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, - "a482c481a9ffaad4735775282cf6e8d68f284884e7c6f043e9737a0d236f2e97": { + "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic2!", + "name": "eth_tx_id", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "topic3!", + "name": "tx_hash", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "value!", + "name": "created_at", "ordinal": 3, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l1_address!", + "name": "updated_at", "ordinal": 4, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_address!", + "name": "base_fee_per_gas", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "symbol!", + "name": "priority_fee_per_gas", "ordinal": 6, - "type_info": "Varchar" + "type_info": "Int8" }, { - "name": "name!", + "name": "confirmed_at", "ordinal": 7, - "type_info": "Varchar" + "type_info": "Timestamp" }, { - "name": "decimals!", + "name": "signed_raw_tx", "ordinal": 8, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "usd_price?", + "name": "sent_at_block", "ordinal": 9, - "type_info": "Numeric" + "type_info": "Int4" + }, + { + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ @@ -7337,214 +8069,184 @@ false, false, false, - false, - false, + true, + true, + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", - "Bytea" + "Int4" ] } }, - "query": "\n SELECT tx_hash, topic2 as \"topic2!\", topic3 as \"topic3!\", value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" }, - "a4eef598864b0d59bd663eb16bff3a23bcb7ac37bb6a2e702d6415b8dd99cd9f": { + "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "Bool", "Bytea", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Numeric", - "Int8", - "Int8", - "Bytea", - "Bytea" + "Int8" ] } }, - "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count,\n timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data,\n predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost,\n initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash,\n created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())\n " + "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " }, - "a7d575d90f9bf19427ddbe342d296effb7c38bc90f213aa1cc94523930dd8f15": { + "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { "describe": { "columns": [ { - "name": "tx_hash", + "name": "market_volume", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_sender!", + "name": "market_volume_updated_at", "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "topic2!", - "ordinal": 2, - "type_info": "Bytea" - }, - { - "name": "value!", - "ordinal": 3, - "type_info": "Bytea" - }, - { - "name": "l1_address!", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "l2_address!", - "ordinal": 5, - "type_info": "Bytea" - }, - { - "name": "symbol!", - "ordinal": 6, - "type_info": "Varchar" - }, - { - "name": "name!", - "ordinal": 7, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "usd_price?", - "ordinal": 9, - "type_info": "Numeric" + "type_info": "Timestamp" } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, + true, true ], "parameters": { "Left": [ - "ByteaArray", - "Bytea", "Bytea" ] } }, - "query": "\n SELECT events.tx_hash, transactions.initiator_address as \"l1_sender!\", events.topic2 as \"topic2!\", events.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM events\n INNER JOIN tokens ON tokens.l2_address = '\\x0000000000000000000000000000000000000000'\n INNER JOIN transactions ON transactions.hash = events.tx_hash\n WHERE tx_hash = ANY($1) AND events.topic1 = $2 AND events.address = $3\n ORDER BY tx_hash, events.miniblock_number ASC, event_index_in_block ASC\n " + "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" }, - "a8d2b80d197d8168a6c1b4666e799a9d6c2e31d84986ae352715e687989f913c": { + "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" + }, + "aea4e8d1b018836973d252df943a2c1988dd5f3ffc629064b87d25af8cdb8638": { "describe": { "columns": [ { - "name": "id", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "contract_address", + "name": "l1_batch_tx_index", "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "source_code", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "contract_name", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "compiler_zksolc_version", - "ordinal": 4, - "type_info": "Text" - }, + "type_info": "Int4" + } + ], + "nullable": [ + true, + true + ], + "parameters": { + "Left": [ + "Bytea" + ] + } + }, + "query": "SELECT l1_batch_number, l1_batch_tx_index FROM transactions WHERE hash = $1" + }, + "af22ad34bde12b8d25eb85da9939d12b7bed6407d732b868eeaf2916568c8646": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Time", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " + }, + "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { + "describe": { + "columns": [ { - "name": "optimization_used", - "ordinal": 5, - "type_info": "Bool" - }, + "name": "l1_block_number", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" + }, + "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { + "describe": { + "columns": [ { - "name": "constructor_arguments", - "ordinal": 6, - "type_info": "Bytea" + "name": "id", + "ordinal": 0, + "type_info": "Int4" }, { - "name": "status", - "ordinal": 7, - "type_info": "Text" + "name": "eth_tx_id", + "ordinal": 1, + "type_info": "Int4" }, { - "name": "error", - "ordinal": 8, + "name": "tx_hash", + "ordinal": 2, "type_info": "Text" }, { "name": "created_at", - "ordinal": 9, + "ordinal": 3, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 10, + "ordinal": 4, "type_info": "Timestamp" }, { - "name": "compilation_errors", - "ordinal": 11, - "type_info": "Jsonb" + "name": "base_fee_per_gas", + "ordinal": 5, + "type_info": "Int8" }, { - "name": "processing_started_at", - "ordinal": 12, - "type_info": "Timestamp" + "name": "priority_fee_per_gas", + "ordinal": 6, + "type_info": "Int8" }, { - "name": "compiler_solc_version", - "ordinal": 13, - "type_info": "Text" + "name": "confirmed_at", + "ordinal": 7, + "type_info": "Timestamp" }, { - "name": "attempts", - "ordinal": 14, - "type_info": "Int4" + "name": "signed_raw_tx", + "ordinal": 8, + "type_info": "Bytea" }, { - "name": "panic_message", - "ordinal": 15, - "type_info": "Text" + "name": "sent_at_block", + "ordinal": 9, + "type_info": "Int4" }, { - "name": "is_system", - "ordinal": 16, - "type_info": "Bool" + "name": "sent_at", + "ordinal": 10, + "type_info": "Timestamp" } ], "nullable": [ @@ -7555,76 +8257,130 @@ false, false, false, - false, true, - false, - false, true, true, - false, - false, - true, - false + true ], "parameters": { "Left": [ - "Interval" + "Int4" ] } }, - "query": "UPDATE contract_verification_requests\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id FROM contract_verification_requests\n WHERE status = 'queued' OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n ORDER BY created_at\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING contract_verification_requests.*" + "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" }, - "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { + "b14997f84d11d7eea89168383195c5579eed1c57bb2b416a749e2863ae6594a5": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " + }, + "b479b7d3334f8d4566c294a44e2adb282fbc66a87be5c248c65211c2a8a07db0": { "describe": { "columns": [ { - "name": "count!", + "name": "number", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" } ], "nullable": [ - null + false, + false ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT number, hash FROM miniblocks WHERE number > $1 ORDER BY number ASC LIMIT $2" + }, + "b4c576db7c762103dc6700ded458e996d2e9ef670d7b58b181dbfab02fa426ce": { + "describe": { + "columns": [], + "nullable": [], "parameters": { "Left": [ + "Bytea", "Bytea", "Numeric", - "Interval", - "Interval" + "Numeric", + "Numeric", + "Jsonb", + "Int8", + "Numeric", + "Numeric", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" ] } }, - "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " }, - "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { + "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4", + "Bytea", + "Int4", + "Text" + ] + } + }, + "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " + }, + "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { "describe": { "columns": [ { - "name": "proof", + "name": "l1_address", "ordinal": 0, "type_info": "Bytea" }, { - "name": "aggregation_result_coords", + "name": "l2_address", "ordinal": 1, "type_info": "Bytea" } ], "nullable": [ - true, - true + false, + false ], "parameters": { - "Left": [ - "Int8", - "Int8" - ] + "Left": [] } }, - "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " + "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" }, - "aa1534f03679fd2d1d9e7c1da1f94cc0e2ec5fc3a0e1ac7137147533eacf0aaf": { + "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { "describe": { "columns": [ { @@ -7703,70 +8459,63 @@ false ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] } }, - "query": "SELECT * FROM eth_txs \n WHERE id > (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history)\n ORDER BY id\n LIMIT $1\n " + "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" }, - "ad11ec3e628ae6c64ac160d8dd689b2f64033f620e17a31469788b3ce4968ad3": { + "b79f02c8663c6b99d0aa46b430de32103afa0333e8293cf8661cfc1c3f9fc12e": { "describe": { "columns": [ { "name": "id", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "eth_tx_id", + "name": "contract_address", "ordinal": 1, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "tx_hash", + "name": "source_code", "ordinal": 2, "type_info": "Text" }, { - "name": "created_at", + "name": "contract_name", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "updated_at", + "name": "zk_compiler_version", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "base_fee_per_gas", + "name": "compiler_version", "ordinal": 5, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "priority_fee_per_gas", + "name": "optimization_used", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "confirmed_at", + "name": "optimizer_mode", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Text" }, { - "name": "signed_raw_tx", + "name": "constructor_arguments", "ordinal": 8, "type_info": "Bytea" }, { - "name": "sent_at_block", + "name": "is_system", "ordinal": 9, - "type_info": "Int4" - }, - { - "name": "sent_at", - "ordinal": 10, - "type_info": "Timestamp" + "type_info": "Bool" } ], "nullable": [ @@ -7777,299 +8526,360 @@ false, false, false, - true, - true, - true, - true + true, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used,\n optimizer_mode, constructor_arguments, is_system\n FROM contract_verification_requests\n WHERE status = 'successful'\n ORDER BY id" + }, + "b7ab3aeee71e87c7469428ec411b410d81282ff6fed63fe5cda0e81a330d2ac5": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int2" + ] + } + }, + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + }, + "b7d3b30bff2ed9aabcdaed89ebfd1f0303b70c6d5483ff9183475bb232a04f21": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false ], "parameters": { "Left": [ - "Int4" + "Interval", + "Int2" ] } }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1" + "query": "\n UPDATE witness_inputs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " }, - "ad4f74aa6f131df0243f4fa500ade1b98aa335bd71ed417b02361e2c697e60f8": { + "be824de76050461afe29dfd229e524bdf113eab3ca24208782c200531db1c940": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ - "Bytea", - "Int8" + "Int8", + "Int2", + "Int2", + "Int4" ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " + "query": "\n SELECT id from prover_jobs_fri\n WHERE l1_batch_number = $1\n AND circuit_id = $2\n AND aggregation_round = $3\n AND depth = $4\n AND status = 'successful'\n ORDER BY sequence_number ASC;\n " }, - "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { + "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { "describe": { "columns": [ { - "name": "market_volume", + "name": "bytecode", "ordinal": 0, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "market_volume_updated_at", + "name": "bytecode_hash", "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Bytea" } ], "nullable": [ - true, - true + false, + false ], "parameters": { "Left": [ - "Bytea" + "ByteaArray" ] } }, - "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" + "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" }, - "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { + "c115b25ea0d6b33331d1737cbc4e37ed44c466782d25f3d9c5519dd886f103ee": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8" + "TextArray", + "Text" ] } }, - "query": "DELETE FROM factory_deps WHERE miniblock_number > $1" - }, - "af75db6b7e42b73ce62b28a7281e1bfa181ee0c80a85d7d8078831db5dcdb699": { - "describe": { - "columns": [ - { - "name": "l1_block_number", - "ordinal": 0, - "type_info": "Int4" - } - ], - "nullable": [ - true - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_block_number FROM transactions\n WHERE priority_op_id IS NOT NULL\n ORDER BY priority_op_id DESC\n LIMIT 1" + "query": "\n INSERT INTO compiler_versions (version, compiler, created_at, updated_at)\n SELECT u.version, $2, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)" }, - "b1478907214ad20dddd4f3846fba4b0ddf1fff63ddb3b95c8999635e77c8b863": { + "c1a4eb25f5493fbcc1b6d61bd7f2e74797a83b7eb0900ba16f3c3ca38f824563": { "describe": { "columns": [ { - "name": "id", + "name": "number", "ordinal": 0, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "eth_tx_id", + "name": "timestamp", "ordinal": 1, - "type_info": "Int4" + "type_info": "Int8" }, { - "name": "tx_hash", + "name": "is_finished", "ordinal": 2, - "type_info": "Text" + "type_info": "Bool" }, { - "name": "created_at", + "name": "l1_tx_count", "ordinal": 3, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "updated_at", + "name": "l2_tx_count", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "base_fee_per_gas", + "name": "fee_account_address", "ordinal": 5, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "priority_fee_per_gas", + "name": "bloom", "ordinal": 6, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "confirmed_at", + "name": "priority_ops_onchain_data", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "ByteaArray" }, { - "name": "signed_raw_tx", + "name": "hash", "ordinal": 8, "type_info": "Bytea" }, { - "name": "sent_at_block", + "name": "parent_hash", "ordinal": 9, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "sent_at", + "name": "commitment", "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "compressed_write_logs", + "ordinal": 11, + "type_info": "Bytea" + }, + { + "name": "compressed_contracts", + "ordinal": 12, + "type_info": "Bytea" + }, + { + "name": "eth_prove_tx_id", + "ordinal": 13, + "type_info": "Int4" + }, + { + "name": "eth_commit_tx_id", + "ordinal": 14, + "type_info": "Int4" + }, + { + "name": "eth_execute_tx_id", + "ordinal": 15, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 16, "type_info": "Timestamp" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true - ], - "parameters": { - "Left": [ - "Int4" - ] - } - }, - "query": "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC" - }, - "b4c576db7c762103dc6700ded458e996d2e9ef670d7b58b181dbfab02fa426ce": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Jsonb", - "Int8", - "Numeric", - "Numeric", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" - ] - } - }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n\n paymaster,\n paymaster_input,\n tx_format,\n\n l1_tx_mint,\n l1_tx_refund_recipient,\n\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, TRUE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12,\n $13, $14, $15, $16, $17, $18, now(), now()\n )\n ON CONFLICT (hash) DO NOTHING\n " - }, - "b4cd15d430b423cd5bad80199abf0f67c698ca469e55557f20d5c7460ed40b0d": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Bytea", - "Int4", - "Text" - ] - } - }, - "query": "\n INSERT INTO prover_jobs (l1_batch_number, circuit_type, sequence_number, prover_input, aggregation_round, circuit_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, $3, $4, $5, $6, 'queued', now(), now())\n ON CONFLICT(l1_batch_number, aggregation_round, sequence_number) DO NOTHING\n " - }, - "b4da918ee3b36b56d95c8834edebe65eb48ebb8270fa1e6ccf73ad354fd71134": { - "describe": { - "columns": [ + }, + { + "name": "updated_at", + "ordinal": 17, + "type_info": "Timestamp" + }, + { + "name": "merkle_root_hash", + "ordinal": 18, + "type_info": "Bytea" + }, + { + "name": "l2_to_l1_logs", + "ordinal": 19, + "type_info": "ByteaArray" + }, + { + "name": "l2_to_l1_messages", + "ordinal": 20, + "type_info": "ByteaArray" + }, + { + "name": "predicted_commit_gas_cost", + "ordinal": 21, + "type_info": "Int8" + }, + { + "name": "predicted_prove_gas_cost", + "ordinal": 22, + "type_info": "Int8" + }, + { + "name": "predicted_execute_gas_cost", + "ordinal": 23, + "type_info": "Int8" + }, + { + "name": "initial_bootloader_heap_content", + "ordinal": 24, + "type_info": "Jsonb" + }, { - "name": "l1_address", - "ordinal": 0, + "name": "used_contract_hashes", + "ordinal": 25, + "type_info": "Jsonb" + }, + { + "name": "compressed_initial_writes", + "ordinal": 26, "type_info": "Bytea" }, { - "name": "l2_address", - "ordinal": 1, + "name": "compressed_repeated_writes", + "ordinal": 27, "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT l1_address, l2_address FROM tokens WHERE well_known = true" - }, - "b6c8e0827b2389a14433c031332962495311562ae9652ae7e9409a4bf48dc55b": { - "describe": { - "columns": [ + }, { - "name": "id", - "ordinal": 0, + "name": "l2_l1_compressed_messages", + "ordinal": 28, + "type_info": "Bytea" + }, + { + "name": "l2_l1_merkle_root", + "ordinal": 29, + "type_info": "Bytea" + }, + { + "name": "gas_per_pubdata_byte_in_block", + "ordinal": 30, "type_info": "Int4" }, { - "name": "nonce", - "ordinal": 1, + "name": "rollup_last_leaf_index", + "ordinal": 31, "type_info": "Int8" }, { - "name": "raw_tx", - "ordinal": 2, + "name": "zkporter_is_available", + "ordinal": 32, + "type_info": "Bool" + }, + { + "name": "bootloader_code_hash", + "ordinal": 33, "type_info": "Bytea" }, { - "name": "contract_address", - "ordinal": 3, - "type_info": "Text" + "name": "default_aa_code_hash", + "ordinal": 34, + "type_info": "Bytea" }, { - "name": "tx_type", - "ordinal": 4, - "type_info": "Text" + "name": "base_fee_per_gas", + "ordinal": 35, + "type_info": "Numeric" }, { - "name": "gas_used", - "ordinal": 5, + "name": "gas_per_pubdata_limit", + "ordinal": 36, "type_info": "Int8" }, { - "name": "created_at", - "ordinal": 6, - "type_info": "Timestamp" + "name": "aux_data_hash", + "ordinal": 37, + "type_info": "Bytea" }, { - "name": "updated_at", - "ordinal": 7, - "type_info": "Timestamp" + "name": "pass_through_data_hash", + "ordinal": 38, + "type_info": "Bytea" }, { - "name": "has_failed", - "ordinal": 8, - "type_info": "Bool" + "name": "meta_parameters_hash", + "ordinal": 39, + "type_info": "Bytea" }, { - "name": "sent_at_block", - "ordinal": 9, - "type_info": "Int4" + "name": "skip_proof", + "ordinal": 40, + "type_info": "Bool" }, { - "name": "confirmed_eth_tx_history_id", - "ordinal": 10, - "type_info": "Int4" + "name": "l1_gas_price", + "ordinal": 41, + "type_info": "Int8" }, { - "name": "predicted_gas_cost", - "ordinal": 11, + "name": "l2_fair_gas_price", + "ordinal": 42, "type_info": "Int8" } ], @@ -8079,149 +8889,65 @@ false, false, false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, true, false, false, false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, true, true, - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL \n AND id <= (SELECT COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history WHERE sent_at_block IS NOT NULL)\n ORDER BY id" - }, - "bb3ae24e27a04047af2d6ebc145e86619d29ec89bb2abe39244f5669e82c9571": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n UPDATE l1_batches\n SET hash = $1\n WHERE number = $2\n " - }, - "bd4898ee283a312cb995853686a1f5252e73b22efea3cf9f158c4476c9639b32": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "ByteaArray" - ] - } - }, - "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at)\n SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now()\n FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[])\n AS u(hashed_key, address, key, value, tx_hash)\n ON CONFLICT (hashed_key)\n DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()\n " - }, - "bef58e581dd0b658350dcdc15ebf7cf350cf088b60c916a15889e31ee7534907": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode_hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ false, - false - ], - "parameters": { - "Left": [ - "ByteaArray" - ] - } - }, - "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" - }, - "c0532f9e7a6130426acb032f391f6dae7ff22914f0045673c42c1ee84ca36490": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\"\n FROM (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) sl\n WHERE sl.value != $2\n " - }, - "c1ed4c80984db514dd264a9bc19bdaee29b6f5c291a9d503d9896c41b316cca5": { - "describe": { - "columns": [ - { - "name": "nonce!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - true - ], - "parameters": { - "Left": [ - "Bytea", - "Int8" - ] - } - }, - "query": "\n SELECT nonce as \"nonce!\" FROM transactions\n WHERE initiator_address = $1 AND nonce >= $2\n AND is_priority = FALSE\n AND (miniblock_number IS NOT NULL OR error IS NULL)\n ORDER BY nonce\n " - }, - "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray" - ] - } - }, - "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " - }, - "c2f6f7fa37b303748f47ff2de01227e7afbc9ff041bc1428743d91300f5f5caf": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - true + false, + true, + true, + true, + false, + false, + false ], "parameters": { "Left": [ - "Int8" + "Int4" + ] + } + }, + "query": "SELECT * FROM l1_batches WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1" + }, + "c2cf96a9eb6893c5ba7d9e5418d9f24084ccd87980cb6ee05de1b3bde5c654bd": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray" ] } }, - "query": "\n SELECT l1_batch_number FROM miniblocks\n WHERE number = $1\n " + "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " }, "c321d1210799dfd29e54f18f3a3698e9bf288850f2dbd782e817d1cfd9165b16": { "describe": { @@ -8339,6 +9065,57 @@ }, "query": "\n UPDATE prover_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs\n WHERE circuit_type = ANY($1)\n AND status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs.*\n " }, + "c49a6925e9462cc85a6e1cc850f2e147e0a5d990efed56f27792698e6cf9ff0c": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int2" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING l1_batch_number, status, attempts\n " + }, + "c604ee1dd86ac154d67ddb339da5f65ca849887d6a1068623e874f9df00cfdd1": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "ByteaArray", + "Int4Array", + "VarcharArray", + "JsonbArray", + "Int8Array", + "NumericArray" + ] + } + }, + "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool=FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n effective_gas_price = data_table.effective_gas_price,\n updated_at = now()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::integer[]) AS index_in_block,\n UNNEST($4::varchar[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::bigint[]) as refunded_gas,\n UNNEST($7::numeric[]) as effective_gas_price\n ) AS data_table\n WHERE transactions.hash = data_table.hash\n " + }, "c6109267f85f38edcd53f361cf2654f43fa45928e39324cfab8389453b4e7031": { "describe": { "columns": [ @@ -8444,699 +9221,400 @@ }, "query": "SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE" }, - "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { + "c6cdc9ef18fe20ef530b653c0c24c674dd74aef3701bfb5c6db23d649115f1d4": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - } - }, - "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " - }, - "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "merkle_tree_paths", - "ordinal": 1, - "type_info": "Bytea" - }, - { - "name": "created_at", - "ordinal": 2, - "type_info": "Timestamp" - }, - { - "name": "updated_at", - "ordinal": 3, - "type_info": "Timestamp" - }, - { - "name": "status", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "time_taken", - "ordinal": 5, - "type_info": "Time" - }, - { - "name": "processing_started_at", - "ordinal": 6, - "type_info": "Timestamp" - }, - { - "name": "error", - "ordinal": 7, - "type_info": "Varchar" - }, - { - "name": "attempts", - "ordinal": 8, - "type_info": "Int4" - }, - { - "name": "merkel_tree_paths_blob_url", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "is_blob_cleaned", - "ordinal": 10, - "type_info": "Bool" - } - ], - "nullable": [ - false, - true, - false, - false, - false, - false, - true, - true, - false, - true, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int4", + "Time", "Int8" ] } }, - "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " + "query": "\n UPDATE witness_inputs_fri\n SET status = 'successful', updated_at = now(), time_taken = $1\n WHERE l1_batch_number = $2\n " }, - "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { + "c766f2ee9e3054ba337873ba5ebb26d4f1a43691664372152e5eb782391f9f68": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8", - "Int4", - "Int4" - ] - } - }, - "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" - }, - "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { - "describe": { - "columns": [ - { - "name": "usd_price", - "ordinal": 0, - "type_info": "Numeric" - }, - { - "name": "usd_price_updated_at", - "ordinal": 1, - "type_info": "Timestamp" - } - ], - "nullable": [ - true, - true - ], - "parameters": { - "Left": [ - "Bytea" + "Int8Array" ] } }, - "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" - }, - "c9eefe59225b10d90b67ab92a8f9e3bad92ec02f8dfc2719903149ab9f82fe1c": { - "describe": { - "columns": [ - { - "name": "hash", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "is_priority", - "ordinal": 1, - "type_info": "Bool" - }, - { - "name": "full_fee", - "ordinal": 2, - "type_info": "Numeric" - }, - { - "name": "layer_2_tip_fee", - "ordinal": 3, - "type_info": "Numeric" - }, - { - "name": "initiator_address", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "nonce", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "signature", - "ordinal": 6, - "type_info": "Bytea" - }, - { - "name": "input", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "data", - "ordinal": 8, - "type_info": "Jsonb" - }, - { - "name": "received_at", - "ordinal": 9, - "type_info": "Timestamp" - }, + "query": "\n UPDATE node_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " + }, + "c8125b30eb64eebfa4500dc623972bf8771a83b218bd18a51e633d4cf4bf8eb3": { + "describe": { + "columns": [ { - "name": "priority_op_id", - "ordinal": 10, - "type_info": "Int8" - }, + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + } + }, + "query": "\n SELECT bytecode FROM (\n SELECT * FROM storage_logs\n WHERE\n storage_logs.hashed_key = $1 AND\n storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE value != $3\n " + }, + "c81a1ff168b3a1e94489fb66995b0978c4c6aac92a731144cc22fcc1f4369ba9": { + "describe": { + "columns": [ { "name": "l1_batch_number", - "ordinal": 11, + "ordinal": 0, "type_info": "Int8" }, { - "name": "index_in_block", - "ordinal": 12, - "type_info": "Int4" - }, - { - "name": "error", - "ordinal": 13, - "type_info": "Varchar" - }, - { - "name": "gas_limit", - "ordinal": 14, - "type_info": "Numeric" - }, - { - "name": "gas_per_storage_limit", - "ordinal": 15, - "type_info": "Numeric" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 16, - "type_info": "Numeric" - }, - { - "name": "tx_format", - "ordinal": 17, - "type_info": "Int4" + "name": "merkle_tree_paths", + "ordinal": 1, + "type_info": "Bytea" }, { "name": "created_at", - "ordinal": 18, + "ordinal": 2, "type_info": "Timestamp" }, { "name": "updated_at", - "ordinal": 19, + "ordinal": 3, "type_info": "Timestamp" }, { - "name": "execution_info", - "ordinal": 20, - "type_info": "Jsonb" - }, - { - "name": "contract_address", - "ordinal": 21, - "type_info": "Bytea" - }, - { - "name": "in_mempool", - "ordinal": 22, - "type_info": "Bool" - }, - { - "name": "l1_block_number", - "ordinal": 23, - "type_info": "Int4" - }, - { - "name": "value", - "ordinal": 24, - "type_info": "Numeric" - }, - { - "name": "paymaster", - "ordinal": 25, - "type_info": "Bytea" - }, - { - "name": "paymaster_input", - "ordinal": 26, - "type_info": "Bytea" - }, - { - "name": "max_fee_per_gas", - "ordinal": 27, - "type_info": "Numeric" + "name": "status", + "ordinal": 4, + "type_info": "Text" }, { - "name": "max_priority_fee_per_gas", - "ordinal": 28, - "type_info": "Numeric" + "name": "time_taken", + "ordinal": 5, + "type_info": "Time" }, { - "name": "effective_gas_price", - "ordinal": 29, - "type_info": "Numeric" + "name": "processing_started_at", + "ordinal": 6, + "type_info": "Timestamp" }, { - "name": "miniblock_number", - "ordinal": 30, - "type_info": "Int8" + "name": "error", + "ordinal": 7, + "type_info": "Varchar" }, { - "name": "l1_batch_tx_index", - "ordinal": 31, + "name": "attempts", + "ordinal": 8, "type_info": "Int4" }, { - "name": "refunded_gas", - "ordinal": 32, - "type_info": "Int8" - }, - { - "name": "l1_tx_mint", - "ordinal": 33, - "type_info": "Numeric" - }, - { - "name": "l1_tx_refund_recipient", - "ordinal": 34, - "type_info": "Bytea" - }, - { - "name": "miniblock_timestamp?", - "ordinal": 35, - "type_info": "Int8" - }, - { - "name": "block_hash?", - "ordinal": 36, - "type_info": "Bytea" - }, - { - "name": "eth_commit_tx_hash?", - "ordinal": 37, - "type_info": "Text" - }, - { - "name": "eth_prove_tx_hash?", - "ordinal": 38, + "name": "merkel_tree_paths_blob_url", + "ordinal": 9, "type_info": "Text" }, { - "name": "eth_execute_tx_hash?", - "ordinal": 39, - "type_info": "Text" + "name": "is_blob_cleaned", + "ordinal": 10, + "type_info": "Bool" } ], "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, false, true, false, - true, false, false, false, true, true, - true, - true, - true, false, true, - true, - false, - false, - false, - false, false ], "parameters": { "Left": [ - "Bytea" + "Interval", + "Int4", + "Int8" ] } }, - "query": "\n SELECT transactions.*,\n miniblocks.timestamp as \"miniblock_timestamp?\",\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " - }, - "ca3a65591d2d14e6b597389ee47594f403b5212d79267279c957cbc64d44dc7a": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id)\n WHERE commit_tx.confirmed_at IS NOT NULL\n ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "ca8fa3521dab5ee985a837572e8625bd5b26bf79f58950698218b28110c29d1f": { + "c849561f88c775f2cce4d59387916793ba1623a8a714b415375477e090d86bd3": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Text", - "Int4", + "Int8", "Int4", - "Int2", - "Text", - "Text", - "Int2" + "Int4" ] } }, - "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" + "query": "UPDATE eth_txs\n SET gas_used = $1, confirmed_eth_tx_history_id = $2\n WHERE id = $3" }, - "cbe9445b28efc540d4a01b4c8f1e62017e9854b2d01973c55b27603a8a81bbdd": { + "c891770305cb3aba4021738e60567d977eac54435c871b5178de7c3c96d2f721": { "describe": { "columns": [ { - "name": "value", + "name": "usd_price", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Numeric" + }, + { + "name": "usd_price_updated_at", + "ordinal": 1, + "type_info": "Timestamp" } ], "nullable": [ - false + true, + true ], "parameters": { "Left": [ - "Bytea", - "Int8" + "Bytea" + ] + } + }, + "query": "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1" + }, + "c92a84c15a8641f73417a03de99a0fb7e07fd0da7b376e65b3ed61209e55a5fa": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8Array" ] } }, - "query": "select value from storage_logs where hashed_key = $1 and miniblock_number <= $2 order by miniblock_number desc, operation_number desc limit 1" + "query": "UPDATE witness_inputs SET is_blob_cleaned = TRUE WHERE l1_batch_number = ANY($1)" }, - "ce12a389d218de2071752e8f67b9ad3132777c8a8737009be283e1bedef6dad5": { + "c9eefe59225b10d90b67ab92a8f9e3bad92ec02f8dfc2719903149ab9f82fe1c": { "describe": { "columns": [ { - "name": "number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "timestamp", + "name": "is_priority", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "is_finished", + "name": "full_fee", "ordinal": 2, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "l1_tx_count", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "l2_tx_count", + "name": "initiator_address", "ordinal": 4, - "type_info": "Int4" + "type_info": "Bytea" }, { - "name": "fee_account_address", + "name": "nonce", "ordinal": 5, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "bloom", + "name": "signature", "ordinal": 6, "type_info": "Bytea" }, { - "name": "priority_ops_onchain_data", + "name": "input", "ordinal": 7, - "type_info": "ByteaArray" + "type_info": "Bytea" }, { - "name": "hash", + "name": "data", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Jsonb" }, { - "name": "parent_hash", + "name": "received_at", "ordinal": 9, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "commitment", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_write_logs", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "compressed_contracts", + "name": "index_in_block", "ordinal": 12, - "type_info": "Bytea" + "type_info": "Int4" }, { - "name": "eth_prove_tx_id", + "name": "error", "ordinal": 13, - "type_info": "Int4" + "type_info": "Varchar" }, { - "name": "eth_commit_tx_id", + "name": "gas_limit", "ordinal": 14, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "eth_execute_tx_id", + "name": "gas_per_storage_limit", "ordinal": 15, - "type_info": "Int4" + "type_info": "Numeric" }, { - "name": "created_at", + "name": "gas_per_pubdata_limit", "ordinal": 16, - "type_info": "Timestamp" + "type_info": "Numeric" }, { - "name": "updated_at", + "name": "tx_format", "ordinal": 17, - "type_info": "Timestamp" + "type_info": "Int4" }, { - "name": "merkle_root_hash", + "name": "created_at", "ordinal": 18, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_logs", + "name": "updated_at", "ordinal": 19, - "type_info": "ByteaArray" + "type_info": "Timestamp" }, { - "name": "l2_to_l1_messages", + "name": "execution_info", "ordinal": 20, - "type_info": "ByteaArray" + "type_info": "Jsonb" }, { - "name": "predicted_commit_gas_cost", + "name": "contract_address", "ordinal": 21, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "predicted_prove_gas_cost", + "name": "in_mempool", "ordinal": 22, - "type_info": "Int8" + "type_info": "Bool" }, { - "name": "predicted_execute_gas_cost", + "name": "l1_block_number", "ordinal": 23, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "initial_bootloader_heap_content", + "name": "value", "ordinal": 24, - "type_info": "Jsonb" + "type_info": "Numeric" }, { - "name": "used_contract_hashes", + "name": "paymaster", "ordinal": 25, - "type_info": "Jsonb" + "type_info": "Bytea" }, { - "name": "compressed_initial_writes", + "name": "paymaster_input", "ordinal": 26, "type_info": "Bytea" }, { - "name": "compressed_repeated_writes", + "name": "max_fee_per_gas", "ordinal": 27, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_compressed_messages", + "name": "max_priority_fee_per_gas", "ordinal": 28, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l2_l1_merkle_root", + "name": "effective_gas_price", "ordinal": 29, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "l1_gas_price", + "name": "miniblock_number", "ordinal": 30, "type_info": "Int8" }, { - "name": "l2_fair_gas_price", + "name": "l1_batch_tx_index", "ordinal": 31, - "type_info": "Int8" + "type_info": "Int4" }, { - "name": "rollup_last_leaf_index", + "name": "refunded_gas", "ordinal": 32, "type_info": "Int8" }, { - "name": "zkporter_is_available", + "name": "l1_tx_mint", "ordinal": 33, - "type_info": "Bool" + "type_info": "Numeric" }, { - "name": "bootloader_code_hash", + "name": "l1_tx_refund_recipient", "ordinal": 34, "type_info": "Bytea" }, { - "name": "default_aa_code_hash", + "name": "miniblock_timestamp?", "ordinal": 35, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "base_fee_per_gas", + "name": "block_hash?", "ordinal": 36, - "type_info": "Numeric" + "type_info": "Bytea" }, { - "name": "aux_data_hash", + "name": "eth_commit_tx_hash?", "ordinal": 37, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "pass_through_data_hash", + "name": "eth_prove_tx_hash?", "ordinal": 38, - "type_info": "Bytea" + "type_info": "Text" }, { - "name": "meta_parameters_hash", + "name": "eth_execute_tx_hash?", "ordinal": 39, - "type_info": "Bytea" - }, - { - "name": "skip_proof", - "ordinal": 40, - "type_info": "Bool" - }, - { - "name": "gas_per_pubdata_byte_in_block", - "ordinal": 41, - "type_info": "Int4" - }, - { - "name": "gas_per_pubdata_limit", - "ordinal": 42, - "type_info": "Int8" + "type_info": "Text" } ], "nullable": [ false, false, + true, + true, false, - false, - false, - false, + true, + true, + true, false, false, true, @@ -9149,11 +9627,10 @@ true, false, false, - true, - false, - false, false, + true, false, + true, false, false, false, @@ -9161,101 +9638,113 @@ true, true, true, - false, - false, - true, - true, - true, true, false, true, true, - true, false, - true, + false, + false, + false, false ], "parameters": { "Left": [ - "Int8", - "Int8" + "Bytea" ] } }, - "query": "\n SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit\n FROM\n (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number\n FROM l1_batches\n WHERE eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY number LIMIT $2) inn\n WHERE number - row_number = $1\n " + "query": "\n SELECT transactions.*,\n miniblocks.timestamp as \"miniblock_timestamp?\",\n miniblocks.hash as \"block_hash?\",\n commit_tx.tx_hash as \"eth_commit_tx_hash?\",\n prove_tx.tx_hash as \"eth_prove_tx_hash?\",\n execute_tx.tx_hash as \"eth_execute_tx_hash?\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL)\n LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL)\n WHERE transactions.hash = $1\n " }, - "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { + "ca8fa3521dab5ee985a837572e8625bd5b26bf79f58950698218b28110c29d1f": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Bytea", - "Int8", - "Bytea" + "Text", + "Int4", + "Int4", + "Int2", + "Text", + "Text", + "Int2" ] } }, - "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " + "query": "\n INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at)\n VALUES (cast($1::text as inet), $2, $3, $3, 'available', $4, $5, $6, $7, now(), now())\n ON CONFLICT(instance_host, instance_port, region, zone)\n DO UPDATE SET instance_status='available', queue_capacity=$3, queue_free_slots=$3, specialized_prover_group_id=$4, region=$5, zone=$6, num_gpu=$7, updated_at=now()" }, - "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { + "cba131abb2965f23c392e12b7630295cb8fc4c56775f16c71e65560f74237c94": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "Int8" + "Int8", + "Int4", + "Int4", + "Int8", + "Bool", + "Bytea", + "ByteaArray", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Numeric", + "Int8", + "Int8", + "Bytea", + "Bytea" ] } }, - "query": "DELETE FROM miniblocks WHERE number > $1" + "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())" }, - "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { + "ce3666b149f7fc62a68139a8efb83ed149c7deace17b8968817941763e45a147": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ + "Bytea", "Int8", - "Text", - "Jsonb", - "Text" + "Bytea" ] } }, - "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " + "query": "\n DELETE FROM tokens \n WHERE l2_address IN\n (\n SELECT substring(key, 12, 20) FROM storage_logs \n WHERE storage_logs.address = $1 AND miniblock_number > $2 AND NOT EXISTS (\n SELECT 1 FROM storage_logs as s\n WHERE\n s.hashed_key = storage_logs.hashed_key AND\n (s.miniblock_number, s.operation_number) >= (storage_logs.miniblock_number, storage_logs.operation_number) AND\n s.value = $3\n )\n )\n " }, - "d0571a05a9f65e71b3ab478dc7217c3644024ed0d6ae6616c331a7737759c86c": { + "cea77fbe02853a7a9b1f7b5ddf2957cb23212ae5ef0f889834d796c35b583542": { "describe": { - "columns": [ - { - "name": "merkle_root_hash", - "ordinal": 0, - "type_info": "Bytea" - } - ], - "nullable": [ - true - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ "Int8" ] } }, - "query": "SELECT merkle_root_hash FROM l1_batches WHERE number = $1" + "query": "DELETE FROM miniblocks WHERE number > $1" }, - "d0770d2d0cc0cec5cf5c2e90912b697f19adbdf5cb6e734c3bddd06ad96e83e9": { + "cf9a49dd3ef67b3515e411fd0daadd667af9a4451390b3ef47fe9f902ee9f4e2": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "TextArray" + "Int8", + "Text", + "Jsonb", + "Text" ] } }, - "query": "\n INSERT INTO contract_verification_solc_versions (version, created_at, updated_at)\n SELECT u.version, now(), now()\n FROM UNNEST($1::text[])\n AS u(version)\n " + "query": "\n UPDATE contract_verification_requests\n SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4\n WHERE id = $1\n " }, "d0ff67e7c59684a0e4409726544cf850dbdbb36d038ebbc6a1c5bf0e76b0358c": { "describe": { @@ -9462,27 +9951,7 @@ }, "query": "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1" }, - "d9b5fe50f1669cd648badb6d1ffe3dfa4fd263d9e3f946550bc8551815627ba5": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT l1_batch_number FROM witness_inputs\n WHERE length(merkle_tree_paths) <> 0\n ORDER BY l1_batch_number DESC\n LIMIT $1;\n " - }, - "dbf9a2be8cdd0a8ad95f049134d33ae0c4ed4204e4d8f6e5f3244bea4830f67e": { + "d8e0bb1a349523077356be101808340eab078979390af7d26c71489b5f303d1b": { "describe": { "columns": [], "nullable": [], @@ -9492,20 +9961,20 @@ ] } }, - "query": "\n UPDATE l1_batches\n SET skip_proof = TRUE WHERE number = $1\n " + "query": "UPDATE l1_batches SET skip_proof = TRUE WHERE number = $1" }, - "dbfb1709a68fccf341320f7cf1b757378ec462d63d17672f82a8d9f95797136d": { + "da01d59119023c822cffa5dc226e82b2abd4cbd46d3856d7db16289868a27fa1": { "describe": { "columns": [ { - "name": "hash", + "name": "hashed_key", "ordinal": 0, "type_info": "Bytea" }, { - "name": "received_at", + "name": "l1_batch_number", "ordinal": 1, - "type_info": "Timestamp" + "type_info": "Int8" } ], "nullable": [ @@ -9514,12 +9983,11 @@ ], "parameters": { "Left": [ - "Timestamp", - "Int8" + "ByteaArray" ] } }, - "query": "\n SELECT transactions.hash, transactions.received_at\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = miniblock_number\n WHERE received_at > $1\n ORDER BY received_at ASC\n LIMIT $2\n " + "query": "SELECT hashed_key, l1_batch_number FROM initial_writes WHERE hashed_key = ANY($1::bytea[])" }, "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572": { "describe": { @@ -9539,23 +10007,23 @@ }, "query": "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" }, - "dd10ebfbf5db4d2ac44b03be3acf494ea180f59685d8fc156af481e8265079c2": { + "dc751a25528a272bac17416f782fce3d0aee44b1ae25be0220718b356fda02e8": { "describe": { "columns": [ { - "name": "hash", + "name": "id", "ordinal": 0, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "number", + "name": "status", "ordinal": 1, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "timestamp", + "name": "attempts", "ordinal": 2, - "type_info": "Int8" + "type_info": "Int2" } ], "nullable": [ @@ -9563,13 +10031,34 @@ false, false ], + "parameters": { + "Left": [ + "Interval", + "Int2" + ] + } + }, + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + }, + "dd330bc075a163974c59ec55ecfddd769d05801963b3e0e840e7f11e7bc6d3e9": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], "parameters": { "Left": [ "Int8" ] } }, - "query": "\n SELECT\n hash,\n number,\n timestamp\n FROM miniblocks\n WHERE number > $1\n ORDER BY number ASC\n " + "query": "SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 ORDER BY l1_batch_number DESC LIMIT $1" }, "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { "describe": { @@ -9611,6 +10100,76 @@ }, "query": "\n SELECT circuit_type, result from prover_jobs\n WHERE l1_batch_number = $1 AND status = 'successful' AND aggregation_round = $2\n ORDER BY sequence_number ASC;\n " }, + "deaf3789ac968e299fe0e5a7f1c72494af8ecd664da9c901ec9c0c5e7c29bb65": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "ByteaArray" + ] + } + }, + "query": "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at) SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now() FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[]) AS u(hashed_key, address, key, value, tx_hash) ON CONFLICT (hashed_key) DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()" + }, + "e1235572a080ee86724da2ad5f528e27e6442ad47abd22e04af8efec2c59432b": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" + }, + { + "name": "sequence_number", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE id = (\n SELECT id\n FROM prover_jobs_fri\n WHERE status = 'queued'\n ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + }, "e14338281eb639856f1c7a8ba6b60fe3914d3f30d0b55cea8fb287209892df03": { "describe": { "columns": [ @@ -9663,23 +10222,51 @@ }, "query": "\n WITH sl AS (\n SELECT * FROM storage_logs\n WHERE storage_logs.hashed_key = $1\n ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC\n LIMIT 1\n )\n SELECT\n sl.key as \"key_address\",\n fd.bytecode,\n txs.initiator_address as \"creator_address?\",\n txs.hash as \"creator_tx_hash?\",\n sl.miniblock_number as \"created_in_block_number\",\n c.verification_info\n FROM sl\n JOIN factory_deps fd ON fd.bytecode_hash = sl.value\n LEFT JOIN transactions txs ON txs.hash = sl.tx_hash\n LEFT JOIN contracts_verification_info c ON c.address = $2\n WHERE sl.value != $3\n " }, - "e199251d38cb1f18993863f2e7920f21f7867ae1b48ffc905919de7bd98491de": { + "e1879cce18ad449d58f02254aa9ae4b115152484187161647d012df798985365": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + } + }, + "query": "\n INSERT INTO scheduler_witness_jobs_fri\n (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at)\n VALUES ($1, $2, 'waiting_for_proofs', now(), now())\n ON CONFLICT(l1_batch_number)\n DO UPDATE SET updated_at=now()\n " + }, + "e1ad7a51afef6bd7a95df3294f64b7b1bdc4c4fc7ae5c4195802177986f3e876": { "describe": { "columns": [ { - "name": "min?", + "name": "id", "ordinal": 0, "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" } ], "nullable": [ - null + false, + false, + false ], "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int2" + ] } }, - "query": "\n SELECT MIN(miniblock_number) as \"min?\"\n FROM l2_to_l1_logs\n " + "query": "\n UPDATE prover_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " }, "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { "describe": { @@ -9707,85 +10294,148 @@ }, "query": "UPDATE eth_txs_history\n SET updated_at = now(), confirmed_at = now()\n WHERE tx_hash = $1\n RETURNING id, eth_tx_id" }, - "e33ee15019241ee9307cc447b3f92b54a8348abc8bba5568a3d43b6153d73e9b": { + "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "ByteaArray" + ] + } + }, + "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" + }, + "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" + }, + "ea1477a0c1509f989c0e2aa308cb59bd34b7ec841d5c6c242257ee8bde27ba83": { "describe": { "columns": [ { - "name": "number", + "name": "l1_batch_number", "ordinal": 0, "type_info": "Int8" }, { - "name": "timestamp", + "name": "scheduler_witness", "ordinal": 1, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "hash", + "name": "final_node_aggregations", "ordinal": 2, "type_info": "Bytea" }, { - "name": "l1_tx_count", + "name": "status", "ordinal": 3, - "type_info": "Int4" + "type_info": "Text" }, { - "name": "l2_tx_count", + "name": "processing_started_at", "ordinal": 4, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "base_fee_per_gas", + "name": "time_taken", "ordinal": 5, - "type_info": "Numeric" + "type_info": "Time" }, { - "name": "l1_gas_price", + "name": "error", "ordinal": 6, - "type_info": "Int8" + "type_info": "Text" }, { - "name": "l2_fair_gas_price", + "name": "created_at", "ordinal": 7, - "type_info": "Int8" + "type_info": "Timestamp" }, { - "name": "bootloader_code_hash", + "name": "updated_at", "ordinal": 8, - "type_info": "Bytea" + "type_info": "Timestamp" }, { - "name": "default_aa_code_hash", + "name": "attempts", "ordinal": 9, + "type_info": "Int4" + }, + { + "name": "aggregation_result_coords", + "ordinal": 10, "type_info": "Bytea" + }, + { + "name": "scheduler_witness_blob_url", + "ordinal": 11, + "type_info": "Text" + }, + { + "name": "final_node_aggregations_blob_url", + "ordinal": 12, + "type_info": "Text" + }, + { + "name": "is_blob_cleaned", + "ordinal": 13, + "type_info": "Bool" } ], "nullable": [ false, false, + true, false, - false, - false, + true, + true, + true, false, false, false, true, - true + true, + true, + false ], "parameters": { - "Left": [] + "Left": [ + "Interval", + "Int4", + "Int8" + ] + } + }, + "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " + }, + "eb95c3daeffd23d35d4e047e3bb8dc44e93492a6d41cf0fd1624d3ea4a2267c9": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] } }, - "query": "\n SELECT number, timestamp, hash, l1_tx_count, l2_tx_count,\n base_fee_per_gas, l1_gas_price, l2_fair_gas_price,\n bootloader_code_hash, default_aa_code_hash\n FROM miniblocks\n ORDER BY number DESC \n LIMIT 1\n " + "query": "UPDATE l1_batches SET predicted_commit_gas_cost = $2, updated_at = now() WHERE number = $1" }, - "e42721cc22fbb2bda84f64057586f019cc5122c8e8723f2a9df778b2aa19fffc": { + "eda61fd8012aadc27a2952e96d4238bccb21ec47a17e326a7ae9182d5358d733": { "describe": { "columns": [ { - "name": "version", + "name": "timestamp", "ordinal": 0, - "type_info": "Text" + "type_info": "Int8" } ], "nullable": [ @@ -9795,136 +10445,279 @@ "Left": [] } }, - "query": "SELECT version FROM contract_verification_solc_versions ORDER by version" + "query": "SELECT timestamp FROM l1_batches WHERE eth_prove_tx_id IS NULL AND number > 0 ORDER BY number LIMIT 1" }, - "e7f7e746aca1c17a8c88aba2db3f7cbd7c639c003580fc72e7b6af4c8ffba595": { + "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { "describe": { - "columns": [ - { - "name": "bytecode_hash", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], + "columns": [], + "nullable": [], "parameters": { "Left": [ - "Int8", - "Int8" + "Int8Array" ] } }, - "query": "SELECT bytecode_hash, bytecode FROM factory_deps\n WHERE miniblock_number >= $1 AND miniblock_number <= $2" + "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " }, - "e900682a160af90d532da47a1222fc1d7c9962ee8996dbd9b9bb63f13820cf2b": { + "ee5727dc06a7385969e834556b96bbfdf12a5049a1a1c270f203ef3fa0e8cb94": { "describe": { "columns": [], "nullable": [], "parameters": { "Left": [ - "ByteaArray" + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bool", + "Bytea", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int8" ] } }, - "query": "DELETE FROM transactions WHERE in_mempool = TRUE AND initiator_address = ANY($1)" + "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7, l2_l1_merkle_root = $8, zkporter_is_available = $9, bootloader_code_hash = $10, rollup_last_leaf_index = $11, aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14, updated_at = now() WHERE number = $15" }, - "e90688187953eb3c8f5ff4b25c4a6b838e6717c720643b441dece5079b441fc2": { + "ee7bd820bf35c5c714092494c386eccff25457cff6dc00eb81d9809eaeb95670": { "describe": { - "columns": [], - "nullable": [], + "columns": [ + { + "name": "is_replaced!", + "ordinal": 0, + "type_info": "Bool" + } + ], + "nullable": [ + null + ], "parameters": { - "Left": [] + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Numeric", + "Bytea", + "Jsonb", + "Int4", + "Bytea", + "Numeric", + "Bytea", + "Bytea", + "Int8", + "Int4", + "Int4", + "Timestamp" + ] } }, - "query": "DELETE FROM eth_txs WHERE id >=\n (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" + "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, FALSE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n $19, now(), now()\n )\n ON CONFLICT\n (initiator_address, nonce)\n DO UPDATE\n SET hash=$1,\n signature=$4,\n gas_limit=$5,\n max_fee_per_gas=$6,\n max_priority_fee_per_gas=$7,\n gas_per_pubdata_limit=$8,\n input=$9,\n data=$10,\n tx_format=$11,\n contract_address=$12,\n value=$13,\n paymaster=$14,\n paymaster_input=$15,\n execution_info=jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n in_mempool=FALSE,\n received_at=$19,\n created_at=now(),\n updated_at=now(),\n error = NULL\n WHERE transactions.is_priority = FALSE AND transactions.miniblock_number IS NULL\n RETURNING (SELECT hash FROM transactions WHERE transactions.initiator_address = $2 AND transactions.nonce = $3) IS NOT NULL as \"is_replaced!\"\n " }, - "ea1477a0c1509f989c0e2aa308cb59bd34b7ec841d5c6c242257ee8bde27ba83": { + "ee87b42383cd6b4f1445e2aa152369fee31a7fea436db8b3b9925a60ac60cd1a": { "describe": { "columns": [ { - "name": "l1_batch_number", + "name": "hash", "ordinal": 0, - "type_info": "Int8" + "type_info": "Bytea" }, { - "name": "scheduler_witness", + "name": "is_priority", "ordinal": 1, - "type_info": "Bytea" + "type_info": "Bool" }, { - "name": "final_node_aggregations", + "name": "full_fee", "ordinal": 2, - "type_info": "Bytea" + "type_info": "Numeric" }, { - "name": "status", + "name": "layer_2_tip_fee", "ordinal": 3, - "type_info": "Text" + "type_info": "Numeric" }, { - "name": "processing_started_at", + "name": "initiator_address", "ordinal": 4, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "time_taken", + "name": "nonce", "ordinal": 5, - "type_info": "Time" + "type_info": "Int8" }, { - "name": "error", + "name": "signature", "ordinal": 6, - "type_info": "Text" + "type_info": "Bytea" }, { - "name": "created_at", + "name": "input", "ordinal": 7, - "type_info": "Timestamp" + "type_info": "Bytea" }, { - "name": "updated_at", + "name": "data", "ordinal": 8, - "type_info": "Timestamp" + "type_info": "Jsonb" }, { - "name": "attempts", + "name": "received_at", "ordinal": 9, - "type_info": "Int4" + "type_info": "Timestamp" }, { - "name": "aggregation_result_coords", + "name": "priority_op_id", "ordinal": 10, - "type_info": "Bytea" + "type_info": "Int8" }, { - "name": "scheduler_witness_blob_url", + "name": "l1_batch_number", "ordinal": 11, - "type_info": "Text" + "type_info": "Int8" }, { - "name": "final_node_aggregations_blob_url", + "name": "index_in_block", "ordinal": 12, - "type_info": "Text" + "type_info": "Int4" }, { - "name": "is_blob_cleaned", + "name": "error", "ordinal": 13, + "type_info": "Varchar" + }, + { + "name": "gas_limit", + "ordinal": 14, + "type_info": "Numeric" + }, + { + "name": "gas_per_storage_limit", + "ordinal": 15, + "type_info": "Numeric" + }, + { + "name": "gas_per_pubdata_limit", + "ordinal": 16, + "type_info": "Numeric" + }, + { + "name": "tx_format", + "ordinal": 17, + "type_info": "Int4" + }, + { + "name": "created_at", + "ordinal": 18, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 19, + "type_info": "Timestamp" + }, + { + "name": "execution_info", + "ordinal": 20, + "type_info": "Jsonb" + }, + { + "name": "contract_address", + "ordinal": 21, + "type_info": "Bytea" + }, + { + "name": "in_mempool", + "ordinal": 22, "type_info": "Bool" + }, + { + "name": "l1_block_number", + "ordinal": 23, + "type_info": "Int4" + }, + { + "name": "value", + "ordinal": 24, + "type_info": "Numeric" + }, + { + "name": "paymaster", + "ordinal": 25, + "type_info": "Bytea" + }, + { + "name": "paymaster_input", + "ordinal": 26, + "type_info": "Bytea" + }, + { + "name": "max_fee_per_gas", + "ordinal": 27, + "type_info": "Numeric" + }, + { + "name": "max_priority_fee_per_gas", + "ordinal": 28, + "type_info": "Numeric" + }, + { + "name": "effective_gas_price", + "ordinal": 29, + "type_info": "Numeric" + }, + { + "name": "miniblock_number", + "ordinal": 30, + "type_info": "Int8" + }, + { + "name": "l1_batch_tx_index", + "ordinal": 31, + "type_info": "Int4" + }, + { + "name": "refunded_gas", + "ordinal": 32, + "type_info": "Int8" + }, + { + "name": "l1_tx_mint", + "ordinal": 33, + "type_info": "Numeric" + }, + { + "name": "l1_tx_refund_recipient", + "ordinal": 34, + "type_info": "Bytea" } ], "nullable": [ false, false, true, + true, + false, + true, + true, + true, false, + false, + true, + true, + true, + true, + true, true, true, true, @@ -9932,69 +10725,27 @@ false, false, true, + false, + true, + false, + false, + false, true, true, - false + true, + true, + true, + false, + true, + true ], "parameters": { "Left": [ - "Interval", - "Int4", "Int8" ] } }, - "query": "\n UPDATE scheduler_witness_jobs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM scheduler_witness_jobs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING scheduler_witness_jobs.*\n " - }, - "edc9e374698c57ba9f65f83f0e1945e4785d8b4bc95f46ed4d16c095e5511709": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array" - ] - } - }, - "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET is_blob_cleaned=TRUE\n WHERE l1_batch_number = ANY($1);\n " - }, - "ee7bd820bf35c5c714092494c386eccff25457cff6dc00eb81d9809eaeb95670": { - "describe": { - "columns": [ - { - "name": "is_replaced!", - "ordinal": 0, - "type_info": "Bool" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Numeric", - "Bytea", - "Jsonb", - "Int4", - "Bytea", - "Numeric", - "Bytea", - "Bytea", - "Int8", - "Int4", - "Int4", - "Timestamp" - ] - } - }, - "query": "\n INSERT INTO transactions\n (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1, FALSE, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n $19, now(), now()\n )\n ON CONFLICT\n (initiator_address, nonce)\n DO UPDATE\n SET hash=$1,\n signature=$4,\n gas_limit=$5,\n max_fee_per_gas=$6,\n max_priority_fee_per_gas=$7,\n gas_per_pubdata_limit=$8,\n input=$9,\n data=$10,\n tx_format=$11,\n contract_address=$12,\n value=$13,\n paymaster=$14,\n paymaster_input=$15,\n execution_info=jsonb_build_object('gas_used', $16::bigint, 'storage_writes', $17::int, 'contracts_used', $18::int),\n in_mempool=FALSE,\n received_at=$19,\n created_at=now(),\n updated_at=now(),\n error = NULL\n WHERE transactions.is_priority = FALSE AND transactions.miniblock_number IS NULL\n RETURNING (SELECT hash FROM transactions WHERE transactions.initiator_address = $2 AND transactions.nonce = $3) IS NOT NULL as \"is_replaced!\"\n " + "query": "SELECT * FROM transactions WHERE miniblock_number = $1 ORDER BY index_in_block" }, "efc83e42f5d0238b8996a5b311746527289a5a002ff659531a076680127e8eb4": { "describe": { @@ -10016,53 +10767,6 @@ }, "query": "SELECT hash FROM l1_batches WHERE number = $1" }, - "f0308ffa4cc34a305150959ad1a30792c0b2bf493c6fa6183725b731a89c11e8": { - "describe": { - "columns": [ - { - "name": "count", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - } - }, - "query": "SELECT count(*)\n FROM storage\n WHERE\n address = $1 AND\n value != $2\n " - }, - "f0c50c53c3883c1ae59263b40e55011760d64350eff411eef856ff301bb70579": { - "describe": { - "columns": [ - { - "name": "l1_batch_number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_tx_index", - "ordinal": 1, - "type_info": "Int4" - } - ], - "nullable": [ - true, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT l1_batch_number, l1_batch_tx_index\n FROM transactions\n WHERE hash = $1\n " - }, "f0c83c517fdf9696a0acf288f061bd00a993e0b2379b667738b6876e2f588043": { "describe": { "columns": [ @@ -10167,6 +10871,38 @@ }, "query": "\n SELECT storage.value as \"value!\",\n tokens.l1_address as \"l1_address!\", tokens.l2_address as \"l2_address!\",\n tokens.symbol as \"symbol!\", tokens.name as \"name!\", tokens.decimals as \"decimals!\", tokens.usd_price as \"usd_price?\"\n FROM storage\n INNER JOIN tokens ON\n storage.address = tokens.l2_address OR (storage.address = $2 AND tokens.l2_address = $3)\n WHERE storage.hashed_key = ANY($1)\n " }, + "f5e3c4b23fa0d0686b400b64c42cf78b2219f0cbcf1c9240b77e4132513e36ef": { + "describe": { + "columns": [ + { + "name": "address", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "key", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "value", + "ordinal": 2, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT address, key, value FROM storage_logs WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) ORDER BY miniblock_number, operation_number" + }, "f76f7d03cce064c0240da83a4ba75a0ce3fb57a18723c278a3d05eaf085f8994": { "describe": { "columns": [ @@ -10188,22 +10924,29 @@ }, "query": "SELECT COUNT(*) as \"count!\" FROM transactions\n WHERE miniblock_number BETWEEN $1 AND $2" }, - "f93109d1cc02f5516b40a4a29082a46fd6fa66972bae710d08cfe6a1484b1616": { + "f78960549e6201527454d060d5b483db032f4df80b4269a624f0309ed9a6a38e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + } + }, + "query": "\n UPDATE witness_inputs_fri SET status ='failed', error= $1, updated_at = now()\n WHERE l1_batch_number = $2\n " + }, + "fa006dda8f56abb70afc5ba8b6da631747d17ebd03a37ddb72914c4ed2aeb2f5": { "describe": { "columns": [ { - "name": "assembly_code", + "name": "trace", "ordinal": 0, - "type_info": "Text" - }, - { - "name": "pc_line_mapping", - "ordinal": 1, "type_info": "Jsonb" } ], "nullable": [ - false, false ], "parameters": { @@ -10212,27 +10955,123 @@ ] } }, - "query": "SELECT assembly_code, pc_line_mapping FROM contract_sources WHERE address = $1" + "query": "SELECT trace FROM transaction_traces WHERE tx_hash = $1" }, - "fa006dda8f56abb70afc5ba8b6da631747d17ebd03a37ddb72914c4ed2aeb2f5": { + "fa2b4316aaef09e96d93b70f96b129ed123951732e01d63f30b4b292d441ea39": { "describe": { "columns": [ { - "name": "trace", + "name": "l1_batch_number", "ordinal": 0, - "type_info": "Jsonb" + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "circuit_1_final_prover_job_id", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "circuit_2_final_prover_job_id", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "circuit_3_final_prover_job_id", + "ordinal": 4, + "type_info": "Int8" + }, + { + "name": "circuit_4_final_prover_job_id", + "ordinal": 5, + "type_info": "Int8" + }, + { + "name": "circuit_5_final_prover_job_id", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "circuit_6_final_prover_job_id", + "ordinal": 7, + "type_info": "Int8" + }, + { + "name": "circuit_7_final_prover_job_id", + "ordinal": 8, + "type_info": "Int8" + }, + { + "name": "circuit_8_final_prover_job_id", + "ordinal": 9, + "type_info": "Int8" + }, + { + "name": "circuit_9_final_prover_job_id", + "ordinal": 10, + "type_info": "Int8" + }, + { + "name": "circuit_10_final_prover_job_id", + "ordinal": 11, + "type_info": "Int8" + }, + { + "name": "circuit_11_final_prover_job_id", + "ordinal": 12, + "type_info": "Int8" + }, + { + "name": "circuit_12_final_prover_job_id", + "ordinal": 13, + "type_info": "Int8" + }, + { + "name": "circuit_13_final_prover_job_id", + "ordinal": 14, + "type_info": "Int8" + }, + { + "name": "created_at", + "ordinal": 15, + "type_info": "Timestamp" + }, + { + "name": "updated_at", + "ordinal": 16, + "type_info": "Timestamp" } ], "nullable": [ + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, false ], "parameters": { "Left": [ - "Bytea" + "Int8" ] } }, - "query": "SELECT trace FROM transaction_traces WHERE tx_hash = $1" + "query": "\n SELECT * FROM scheduler_dependency_tracker_fri\n WHERE l1_batch_number = $1\n " }, "fa33d51f8627376832b11bb174354e65e645ee2fb81564a97725518f47ae6f57": { "describe": { diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 760bfcd62cce..6225460f01ac 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -1,20 +1,17 @@ -use std::collections::HashMap; -use std::convert::{Into, TryInto}; -use std::time::Instant; +use std::{ + collections::HashMap, + convert::{Into, TryInto}, + time::Instant, +}; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; use sqlx::Row; -use zksync_types::aggregated_operations::AggregatedActionType; -use zksync_types::commitment::{BlockWithMetadata, CommitmentSerializable}; - -use zksync_types::MAX_GAS_PER_PUBDATA_BYTE; - -use zksync_types::helpers::unix_timestamp_ms; use zksync_types::{ + aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, - commitment::BlockMetadata, - L1BatchNumber, MiniblockNumber, H256, + commitment::{BlockMetadata, BlockWithMetadata}, + L1BatchNumber, MiniblockNumber, H256, MAX_GAS_PER_PUBDATA_BYTE, }; use crate::{ @@ -24,183 +21,173 @@ use crate::{ #[derive(Debug)] pub struct BlocksDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl BlocksDal<'_, '_> { - pub fn is_genesis_needed(&mut self) -> bool { - async_std::task::block_on(async { - let count: i64 = sqlx::query!(r#"SELECT COUNT(*) as "count!" FROM l1_batches"#) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count; - count == 0 - }) + pub async fn is_genesis_needed(&mut self) -> bool { + let count = sqlx::query!("SELECT COUNT(*) as \"count!\" FROM l1_batches") + .fetch_one(self.storage.conn()) + .await + .unwrap() + .count; + count == 0 } - pub fn get_sealed_block_number(&mut self) -> L1BatchNumber { - async_std::task::block_on(async { - let started_at = Instant::now(); - let number: i64 = sqlx::query!( - r#"SELECT MAX(number) as "number" FROM l1_batches WHERE is_finished = TRUE"# - ) + pub async fn get_sealed_block_number(&mut self) -> L1BatchNumber { + let started_at = Instant::now(); + let number = sqlx::query!( + "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .number + .expect("DAL invocation before genesis"); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); + L1BatchNumber(number as u32) + } + + pub async fn get_sealed_miniblock_number(&mut self) -> MiniblockNumber { + let started_at = Instant::now(); + let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM miniblocks") .fetch_one(self.storage.conn()) .await .unwrap() .number - .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); - L1BatchNumber(number as u32) - }) + .unwrap_or(0); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_miniblock_number"); + MiniblockNumber(number as u32) } - pub fn get_sealed_miniblock_number(&mut self) -> MiniblockNumber { - async_std::task::block_on(async { - let started_at = Instant::now(); - let number: i64 = sqlx::query!(r#"SELECT MAX(number) as "number" FROM miniblocks"#) + pub async fn get_last_block_number_with_metadata(&mut self) -> L1BatchNumber { + let started_at = Instant::now(); + let number: i64 = + sqlx::query!("SELECT MAX(number) as \"number\" FROM l1_batches WHERE hash IS NOT NULL") .fetch_one(self.storage.conn()) .await .unwrap() .number - .unwrap_or(0); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_miniblock_number"); - MiniblockNumber(number as u32) - }) - } + .expect("DAL invocation before genesis"); - pub fn get_last_block_number_with_metadata(&mut self) -> L1BatchNumber { - async_std::task::block_on(async { - let started_at = Instant::now(); - let number: i64 = sqlx::query!( - r#"SELECT MAX(number) as "number" FROM l1_batches WHERE hash IS NOT NULL"# - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .number - .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_last_block_number_with_metadata"); - L1BatchNumber(number as u32) - }) + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_last_block_number_with_metadata"); + L1BatchNumber(number as u32) } - pub fn get_blocks_for_eth_tx_id(&mut self, eth_tx_id: u32) -> Vec { - async_std::task::block_on(async { - let blocks = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches - WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1", - eth_tx_id as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - blocks.into_iter().map(|bl| bl.into()).collect() - }) + pub async fn get_blocks_for_eth_tx_id(&mut self, eth_tx_id: u32) -> Vec { + let blocks = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE eth_commit_tx_id = $1 OR eth_prove_tx_id = $1 OR eth_execute_tx_id = $1", + eth_tx_id as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + blocks.into_iter().map(Into::into).collect() } - pub fn get_storage_block(&mut self, number: L1BatchNumber) -> Option { - async_std::task::block_on(async { - sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches WHERE number = $1", - number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - }) + pub async fn get_storage_block(&mut self, number: L1BatchNumber) -> Option { + sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() } - pub fn get_block_header(&mut self, number: L1BatchNumber) -> Option { - self.get_storage_block(number).map(Into::into) + pub async fn get_block_header(&mut self, number: L1BatchNumber) -> Option { + self.get_storage_block(number).await.map(Into::into) } - pub fn set_eth_tx_id( + pub async fn set_eth_tx_id( &mut self, first_block: L1BatchNumber, last_block: L1BatchNumber, eth_tx_id: u32, aggregation_type: AggregatedActionType, ) { - async_std::task::block_on(async { - match aggregation_type { - AggregatedActionType::CommitBlocks => { - sqlx::query!( - "UPDATE l1_batches \ - SET eth_commit_tx_id = $1, updated_at = now() \ - WHERE number BETWEEN $2 AND $3", - eth_tx_id as i32, - *first_block as i64, - *last_block as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - AggregatedActionType::PublishProofBlocksOnchain => { - sqlx::query!( - "UPDATE l1_batches \ - SET eth_prove_tx_id = $1, updated_at = now() \ - WHERE number BETWEEN $2 AND $3", - eth_tx_id as i32, - *first_block as i64, - *last_block as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - AggregatedActionType::ExecuteBlocks => { - sqlx::query!( - "UPDATE l1_batches \ - SET eth_execute_tx_id = $1, updated_at = now() \ - WHERE number BETWEEN $2 AND $3", - eth_tx_id as i32, - *first_block as i64, - *last_block as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } + match aggregation_type { + AggregatedActionType::CommitBlocks => { + sqlx::query!( + "UPDATE l1_batches \ + SET eth_commit_tx_id = $1, updated_at = now() \ + WHERE number BETWEEN $2 AND $3", + eth_tx_id as i32, + first_block.0 as i64, + last_block.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + AggregatedActionType::PublishProofBlocksOnchain => { + sqlx::query!( + "UPDATE l1_batches \ + SET eth_prove_tx_id = $1, updated_at = now() \ + WHERE number BETWEEN $2 AND $3", + eth_tx_id as i32, + first_block.0 as i64, + last_block.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - }) + AggregatedActionType::ExecuteBlocks => { + sqlx::query!( + "UPDATE l1_batches \ + SET eth_execute_tx_id = $1, updated_at = now() \ + WHERE number BETWEEN $2 AND $3", + eth_tx_id as i32, + first_block.0 as i64, + last_block.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + } } - pub fn insert_l1_batch(&mut self, block: L1BatchHeader, predicted_block_gas: BlockGasCount) { - async_std::task::block_on(async { - let priority_onchain_data: Vec> = block - .priority_ops_onchain_data - .iter() - .map(|data| data.clone().into()) - .collect(); - let l2_to_l1_logs: Vec> = block - .l2_to_l1_logs - .iter() - .map(|log| log.clone().to_bytes()) - .collect(); - - let initial_bootloader_contents = - serde_json::to_value(block.initial_bootloader_contents) - .expect("failed to serialize initial_bootloader_contents to JSON value"); - - let used_contract_hashes = serde_json::to_value(block.used_contract_hashes) - .expect("failed to serialize used_contract_hashes to JSON value"); - - let base_fee_per_gas = BigDecimal::from_u64(block.base_fee_per_gas) - .expect("block.base_fee_per_gas should fit in u64"); - - sqlx::query!( - "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, - timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, - predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, - initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, - bootloader_code_hash, default_aa_code_hash, - created_at, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now()) - ", + pub async fn insert_l1_batch( + &mut self, + block: &L1BatchHeader, + predicted_block_gas: BlockGasCount, + ) { + let priority_onchain_data: Vec> = block + .priority_ops_onchain_data + .iter() + .map(|data| data.clone().into()) + .collect(); + let l2_to_l1_logs: Vec<_> = block + .l2_to_l1_logs + .iter() + .map(|log| log.to_bytes().to_vec()) + .collect(); + + let initial_bootloader_contents = serde_json::to_value(&block.initial_bootloader_contents) + .expect("failed to serialize initial_bootloader_contents to JSON value"); + let used_contract_hashes = serde_json::to_value(&block.used_contract_hashes) + .expect("failed to serialize used_contract_hashes to JSON value"); + let base_fee_per_gas = BigDecimal::from_u64(block.base_fee_per_gas) + .expect("block.base_fee_per_gas should fit in u64"); + + sqlx::query!( + "INSERT INTO l1_batches (\ + number, l1_tx_count, l2_tx_count, \ + timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, \ + bloom, priority_ops_onchain_data, \ + predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, \ + initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, \ + l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, \ + created_at, updated_at\ + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, now(), now())", block.number.0 as i64, block.l1_tx_count as i32, block.l2_tx_count as i32, @@ -227,836 +214,852 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes() - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn insert_miniblock(&mut self, miniblock_header: MiniblockHeader) { + pub async fn insert_miniblock(&mut self, miniblock_header: &MiniblockHeader) { let base_fee_per_gas = BigDecimal::from_u64(miniblock_header.base_fee_per_gas) .expect("base_fee_per_gas should fit in u64"); - async_std::task::block_on(async { - sqlx::query!( - " - INSERT INTO miniblocks ( - number, timestamp, hash, l1_tx_count, l2_tx_count, - base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, - bootloader_code_hash, default_aa_code_hash, - created_at, updated_at - ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now()) - ", - miniblock_header.number.0 as i64, - miniblock_header.timestamp as i64, - miniblock_header.hash.as_bytes(), - miniblock_header.l1_tx_count as i32, - miniblock_header.l2_tx_count as i32, - base_fee_per_gas, - miniblock_header.l1_gas_price as i64, - miniblock_header.l2_fair_gas_price as i64, - MAX_GAS_PER_PUBDATA_BYTE as i64, - miniblock_header - .base_system_contracts_hashes - .bootloader - .as_bytes(), - miniblock_header - .base_system_contracts_hashes - .default_aa - .as_bytes(), - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + + sqlx::query!( + "INSERT INTO miniblocks (\ + number, timestamp, hash, l1_tx_count, l2_tx_count, \ + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, \ + bootloader_code_hash, default_aa_code_hash, \ + created_at, updated_at\ + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, now(), now())", + miniblock_header.number.0 as i64, + miniblock_header.timestamp as i64, + miniblock_header.hash.as_bytes(), + miniblock_header.l1_tx_count as i32, + miniblock_header.l2_tx_count as i32, + base_fee_per_gas, + miniblock_header.l1_gas_price as i64, + miniblock_header.l2_fair_gas_price as i64, + MAX_GAS_PER_PUBDATA_BYTE as i64, + miniblock_header + .base_system_contracts_hashes + .bootloader + .as_bytes(), + miniblock_header + .base_system_contracts_hashes + .default_aa + .as_bytes(), + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn get_last_sealed_miniblock_header(&mut self) -> Option { - async_std::task::block_on(async { - sqlx::query_as!( - StorageMiniblockHeader, - " - SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, - base_fee_per_gas, l1_gas_price, l2_fair_gas_price, - bootloader_code_hash, default_aa_code_hash - FROM miniblocks - ORDER BY number DESC - LIMIT 1 - ", - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.into()) - }) + pub async fn get_last_sealed_miniblock_header(&mut self) -> Option { + sqlx::query_as!( + StorageMiniblockHeader, + "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ + bootloader_code_hash, default_aa_code_hash \ + FROM miniblocks \ + ORDER BY number DESC \ + LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(Into::into) } - pub fn get_miniblock_header( + pub async fn get_miniblock_header( &mut self, miniblock_number: MiniblockNumber, ) -> Option { - async_std::task::block_on(async { - sqlx::query_as!( - StorageMiniblockHeader, - " - SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, - base_fee_per_gas, l1_gas_price, l2_fair_gas_price, - bootloader_code_hash, default_aa_code_hash - FROM miniblocks - WHERE number = $1 - ", - miniblock_number.0 as i64, - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.into()) - }) + sqlx::query_as!( + StorageMiniblockHeader, + "SELECT number, timestamp, hash, l1_tx_count, l2_tx_count, \ + base_fee_per_gas, l1_gas_price, l2_fair_gas_price, \ + bootloader_code_hash, default_aa_code_hash \ + FROM miniblocks \ + WHERE number = $1", + miniblock_number.0 as i64, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(Into::into) } - pub fn mark_miniblocks_as_executed_in_l1_batch(&mut self, l1_batch_number: L1BatchNumber) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE miniblocks - SET l1_batch_number = $1 - WHERE l1_batch_number IS NULL - ", - l1_batch_number.0 as i32, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + pub async fn mark_miniblocks_as_executed_in_l1_batch( + &mut self, + l1_batch_number: L1BatchNumber, + ) { + sqlx::query!( + "UPDATE miniblocks \ + SET l1_batch_number = $1 \ + WHERE l1_batch_number IS NULL", + l1_batch_number.0 as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn save_block_metadata( + pub async fn save_block_metadata( &mut self, block_number: L1BatchNumber, - block_metadata: BlockMetadata, + block_metadata: &BlockMetadata, ) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE l1_batches - SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, - compressed_repeated_writes = $5, compressed_initial_writes = $6, l2_l1_compressed_messages = $7, - l2_l1_merkle_root = $8, - zkporter_is_available = $9, bootloader_code_hash = $10, rollup_last_leaf_index = $11, - aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14, - updated_at = now() - WHERE number = $15 - ", - block_metadata.root_hash.as_bytes(), - block_metadata.merkle_root_hash.as_bytes(), - block_metadata.commitment.as_bytes(), - block_metadata.block_meta_params.default_aa_code_hash.as_bytes(), - block_metadata.repeated_writes_compressed, - block_metadata.initial_writes_compressed, - block_metadata.l2_l1_messages_compressed, - block_metadata.l2_l1_merkle_root.as_bytes(), - block_metadata.block_meta_params.zkporter_is_available, - block_metadata.block_meta_params.bootloader_code_hash.as_bytes(), - block_metadata.rollup_last_leaf_index as i64, - block_metadata.aux_data_hash.as_bytes(), - block_metadata.pass_through_data_hash.as_bytes(), - block_metadata.meta_parameters_hash.as_bytes(), - block_number.0 as i64, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + sqlx::query!( + "UPDATE l1_batches \ + SET hash = $1, merkle_root_hash = $2, commitment = $3, default_aa_code_hash = $4, \ + compressed_repeated_writes = $5, compressed_initial_writes = $6, \ + l2_l1_compressed_messages = $7, l2_l1_merkle_root = $8, \ + zkporter_is_available = $9, bootloader_code_hash = $10, rollup_last_leaf_index = $11, \ + aux_data_hash = $12, pass_through_data_hash = $13, meta_parameters_hash = $14, \ + updated_at = now() \ + WHERE number = $15", + block_metadata.root_hash.as_bytes(), + block_metadata.merkle_root_hash.as_bytes(), + block_metadata.commitment.as_bytes(), + block_metadata.block_meta_params.default_aa_code_hash.as_bytes(), + block_metadata.repeated_writes_compressed, + block_metadata.initial_writes_compressed, + block_metadata.l2_l1_messages_compressed, + block_metadata.l2_l1_merkle_root.as_bytes(), + block_metadata.block_meta_params.zkporter_is_available, + block_metadata.block_meta_params.bootloader_code_hash.as_bytes(), + block_metadata.rollup_last_leaf_index as i64, + block_metadata.aux_data_hash.as_bytes(), + block_metadata.pass_through_data_hash.as_bytes(), + block_metadata.meta_parameters_hash.as_bytes(), + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn save_blocks_metadata( + pub async fn save_blocks_metadata( &mut self, block_number: L1BatchNumber, - block_metadata: BlockMetadata, + block_metadata: &BlockMetadata, previous_root_hash: H256, ) { - async_std::task::block_on(async { - let started_at = Instant::now(); - let update_result = sqlx::query!( - " - UPDATE l1_batches SET - hash = $1, merkle_root_hash = $2, commitment = $3, - compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, - l2_l1_merkle_root = $7, zkporter_is_available = $8, - parent_hash = $9, rollup_last_leaf_index = $10, - aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, - updated_at = NOW() - WHERE number = $14 AND hash IS NULL - ", + let started_at = Instant::now(); + let update_result = sqlx::query!( + "UPDATE l1_batches \ + SET hash = $1, merkle_root_hash = $2, commitment = $3, \ + compressed_repeated_writes = $4, compressed_initial_writes = $5, \ + l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, \ + zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, \ + aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, \ + updated_at = now() \ + WHERE number = $14 AND hash IS NULL", + block_metadata.root_hash.as_bytes(), + block_metadata.merkle_root_hash.as_bytes(), + block_metadata.commitment.as_bytes(), + block_metadata.repeated_writes_compressed, + block_metadata.initial_writes_compressed, + block_metadata.l2_l1_messages_compressed, + block_metadata.l2_l1_merkle_root.as_bytes(), + block_metadata.block_meta_params.zkporter_is_available, + previous_root_hash.0.to_vec(), + block_metadata.rollup_last_leaf_index as i64, + block_metadata.aux_data_hash.as_bytes(), + block_metadata.pass_through_data_hash.as_bytes(), + block_metadata.meta_parameters_hash.as_bytes(), + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + + if update_result.rows_affected() == 0 { + vlog::debug!( + "L1 batch {} info wasn't updated. Details: root_hash: {:?}, merkle_root_hash: {:?}, \ + parent_hash: {:?}, commitment: {:?}, l2_l1_merkle_root: {:?}", + block_number.0 as i64, + block_metadata.root_hash, + block_metadata.merkle_root_hash, + previous_root_hash, + block_metadata.commitment, + block_metadata.l2_l1_merkle_root + ); + + // block was already processed. Verify that existing hashes match + let matched: i64 = sqlx::query!( + "SELECT COUNT(*) as \"count!\" \ + FROM l1_batches \ + WHERE number = $1 AND hash = $2 AND merkle_root_hash = $3 \ + AND parent_hash = $4 AND l2_l1_merkle_root = $5", + block_number.0 as i64, block_metadata.root_hash.as_bytes(), block_metadata.merkle_root_hash.as_bytes(), - block_metadata.commitment.as_bytes(), - block_metadata.repeated_writes_compressed, - block_metadata.initial_writes_compressed, - block_metadata.l2_l1_messages_compressed, + previous_root_hash.as_bytes(), block_metadata.l2_l1_merkle_root.as_bytes(), - block_metadata.block_meta_params.zkporter_is_available, - previous_root_hash.0.to_vec(), - block_metadata.rollup_last_leaf_index as i64, - block_metadata.aux_data_hash.as_bytes(), - block_metadata.pass_through_data_hash.as_bytes(), - block_metadata.meta_parameters_hash.as_bytes(), - block_number.0 as i64, ) - .execute(self.storage.conn()) - .await - .unwrap(); - - if update_result.rows_affected() == 0 { - vlog::debug!( - "L1 batch {} info wasn't updated. Details: root_hash: {:?}, merkle_root_hash: {:?}, parent_hash: {:?}, commitment: {:?}, l2_l1_merkle_root: {:?}", - block_number.0 as i64, - block_metadata.root_hash.0.to_vec(), - block_metadata.merkle_root_hash.0.to_vec(), - previous_root_hash.0.to_vec(), - block_metadata.commitment.0.to_vec(), - block_metadata.l2_l1_merkle_root.as_bytes() - ); - - // block was already processed. Verify that existing hashes match - let matched: i64 = sqlx::query!( - r#" - SELECT COUNT(*) as "count!" - FROM l1_batches - WHERE number = $1 - AND hash = $2 - AND merkle_root_hash = $3 - AND parent_hash = $4 - AND l2_l1_merkle_root = $5 - "#, - block_number.0 as i64, - block_metadata.root_hash.0.to_vec(), - block_metadata.merkle_root_hash.0.to_vec(), - previous_root_hash.0.to_vec(), - block_metadata.l2_l1_merkle_root.as_bytes(), - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count; + .fetch_one(self.storage.conn()) + .await + .unwrap() + .count; - assert_eq!(matched, 1, "Root hash verification failed. Hashes for some of previously processed blocks do not match"); - } - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_blocks_metadata"); - }) + assert!( + matched == 1, + "Root hash verification failed. Hashes for L1 batch #{} do not match the expected values \ + (expected state hash: {:?}, L2 to L1 logs hash: {:?})", + block_number, + block_metadata.root_hash, + block_metadata.l2_l1_merkle_root + ); + } + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_blocks_metadata"); } - pub fn get_last_committed_to_eth_block(&mut self) -> Option { - async_std::task::block_on(async { - // We can get 0 block for the first transaction - let block = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ - WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL \ - ORDER BY number DESC LIMIT 1", - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - // genesis block is first generated without commitment, we should wait for the tree to set it. - block.commitment.as_ref()?; + pub async fn get_last_committed_to_eth_block(&mut self) -> Option { + // We can get 0 block for the first transaction + let block = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE number = 0 OR eth_commit_tx_id IS NOT NULL AND commitment IS NOT NULL \ + ORDER BY number DESC LIMIT 1", + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + // genesis block is first generated without commitment, we should wait for the tree to set it. + block.commitment.as_ref()?; - self.get_block_with_metadata(block) - }) + self.get_block_with_metadata(block).await } /// Returns the number of the last block for which an Ethereum commit tx was sent and confirmed. - pub fn get_number_of_last_block_committed_on_eth(&mut self) -> Option { - async_std::task::block_on(async { - sqlx::query!( - "SELECT number FROM l1_batches - LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) - WHERE commit_tx.confirmed_at IS NOT NULL - ORDER BY number DESC LIMIT 1" - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|record| L1BatchNumber(record.number as u32)) - }) + pub async fn get_number_of_last_block_committed_on_eth(&mut self) -> Option { + sqlx::query!( + "SELECT number FROM l1_batches \ + LEFT JOIN eth_txs_history AS commit_tx \ + ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) \ + WHERE commit_tx.confirmed_at IS NOT NULL \ + ORDER BY number DESC LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.number as u32)) + } + + /// Returns the number of the last block for which an Ethereum prove tx exists in the database. + pub async fn get_last_l1_batch_with_prove_tx(&mut self) -> L1BatchNumber { + let row = sqlx::query!( + "SELECT COALESCE(MAX(number), 0) AS \"number!\" \ + FROM l1_batches \ + WHERE eth_prove_tx_id IS NOT NULL" + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + L1BatchNumber(row.number as u32) } /// Returns the number of the last block for which an Ethereum prove tx was sent and confirmed. - pub fn get_number_of_last_block_proven_on_eth(&mut self) -> Option { - async_std::task::block_on(async { - sqlx::query!( - "SELECT number FROM l1_batches - LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) - WHERE prove_tx.confirmed_at IS NOT NULL - ORDER BY number DESC LIMIT 1" - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|record| L1BatchNumber(record.number as u32)) - }) + pub async fn get_number_of_last_block_proven_on_eth(&mut self) -> Option { + sqlx::query!( + "SELECT number FROM l1_batches \ + LEFT JOIN eth_txs_history AS prove_tx \ + ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) \ + WHERE prove_tx.confirmed_at IS NOT NULL \ + ORDER BY number DESC LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|record| L1BatchNumber(record.number as u32)) } /// Returns the number of the last block for which an Ethereum execute tx was sent and confirmed. - pub fn get_number_of_last_block_executed_on_eth(&mut self) -> Option { - async_std::task::block_on(async { - sqlx::query!( - "SELECT number FROM l1_batches - LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) - WHERE execute_tx.confirmed_at IS NOT NULL - ORDER BY number DESC LIMIT 1" - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|record| L1BatchNumber(record.number as u32)) - }) + pub async fn get_number_of_last_block_executed_on_eth(&mut self) -> Option { + sqlx::query!( + "SELECT number FROM l1_batches \ + LEFT JOIN eth_txs_history as execute_tx \ + ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) \ + WHERE execute_tx.confirmed_at IS NOT NULL \ + ORDER BY number DESC LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.number as u32)) } - /// This method returns blocks for which the proofs are computed - pub fn get_ready_for_proof_blocks_real_verifier( + /// This method returns blocks that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. + pub async fn get_ready_for_dummy_proof_blocks( &mut self, limit: usize, ) -> Vec { - async_std::task::block_on(async { - let last_proved_block_number_row = sqlx::query!( - r#"SELECT COALESCE(max(number), 0) as "number!" FROM l1_batches - WHERE eth_prove_tx_id IS NOT NULL"# - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - let last_proved_block_number = - L1BatchNumber(last_proved_block_number_row.number as u32); - // note that the proofs can be generated out of order, so - // `WHERE l1_batches.number - row_number = $1` is used to avoid having gaps in the list of blocks to proof - // note that we need to manually list all the columns in `l1_batches` table here - we cannot use `*` because there is one extra column (`row_number`) - let l1_batches = sqlx::query_as!( - StorageBlock, - " - SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit - FROM - (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number - FROM l1_batches - LEFT JOIN prover_jobs ON prover_jobs.l1_batch_number = l1_batches.number - WHERE eth_commit_tx_id IS NOT NULL - AND prover_jobs.aggregation_round = 3 - AND prover_jobs.status = 'successful' - AND l1_batches.number > $1 - ORDER BY number LIMIT $2) inn - WHERE number - row_number = $1 - ", - last_proved_block_number.0 as i32, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|block| { - self.get_block_with_metadata(block) - .expect("Block should be complete") - }) - .collect() - }) + let raw_batches = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL \ + ORDER BY number LIMIT $1", + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + self.map_l1_batches(raw_batches).await } - /// This method returns blocks that are confirmed on L1. That is, it doesn't wait for the proofs to be generated. - pub fn get_ready_for_dummy_proof_blocks(&mut self, limit: usize) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ - WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL \ - ORDER BY number LIMIT $1", - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|block| { - self.get_block_with_metadata(block) - .expect("Block should be complete") - }) - .collect() - }) + async fn map_l1_batches(&mut self, raw_batches: Vec) -> Vec { + let mut l1_batches = Vec::with_capacity(raw_batches.len()); + for raw_batch in raw_batches { + let block = self + .get_block_with_metadata(raw_batch) + .await + .expect("Block should be complete"); + l1_batches.push(block); + } + l1_batches } - pub fn set_skip_proof_for_l1_batch(&mut self, l1_batch_number: L1BatchNumber) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE l1_batches - SET skip_proof = TRUE WHERE number = $1 - ", - l1_batch_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + pub async fn set_skip_proof_for_l1_batch(&mut self, l1_batch_number: L1BatchNumber) { + sqlx::query!( + "UPDATE l1_batches SET skip_proof = TRUE WHERE number = $1", + l1_batch_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } /// This method returns blocks that are committed on L1 and witness jobs for them are skipped. - pub fn get_skipped_for_proof_blocks(&mut self, limit: usize) -> Vec { - async_std::task::block_on(async { - let last_proved_block_number_row = sqlx::query!( - r#"SELECT COALESCE(max(number), 0) as "number!" FROM l1_batches - WHERE eth_prove_tx_id IS NOT NULL"# - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - let last_proved_block_number = - L1BatchNumber(last_proved_block_number_row.number as u32); - // note that the witness jobs can be processed out of order, so - // `WHERE l1_batches.number - row_number = $1` is used to avoid having gaps in the list of blocks to send dummy proofs for - // note that we need to manually list all the columns in `l1_batches` table here - we cannot use `*` because there is one extra column (`row_number`) - let l1_batches = sqlx::query_as!( - StorageBlock, - " - SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit - FROM - (SELECT l1_batches.*, row_number() over (order by number ASC) as row_number - FROM l1_batches - WHERE eth_commit_tx_id IS NOT NULL - AND l1_batches.skip_proof = TRUE - AND l1_batches.number > $1 - ORDER BY number LIMIT $2) inn - WHERE number - row_number = $1 - ", - last_proved_block_number.0 as i32, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|block| { - self.get_block_with_metadata(block) - .expect("Block should be complete") - }) - .collect() - }) + pub async fn get_skipped_for_proof_blocks(&mut self, limit: usize) -> Vec { + let last_proved_block_number = self.get_last_l1_batch_with_prove_tx().await; + // Witness jobs can be processed out of order, so `WHERE l1_batches.number - row_number = $1` + // is used to avoid having gaps in the list of blocks to send dummy proofs for. + // We need to manually list all the columns in `l1_batches` table here - we cannot use `*` + // because there is one extra column (`row_number`). + let raw_batches = sqlx::query_as!( + StorageBlock, + "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, \ + bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, \ + compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, created_at, \ + updated_at, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, predicted_commit_gas_cost, \ + predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, \ + used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, \ + l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, \ + rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, \ + default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, \ + meta_parameters_hash, skip_proof, gas_per_pubdata_byte_in_block, gas_per_pubdata_limit \ + FROM \ + (SELECT l1_batches.*, row_number() OVER (ORDER BY number ASC) AS row_number \ + FROM l1_batches \ + WHERE eth_commit_tx_id IS NOT NULL \ + AND l1_batches.skip_proof = TRUE \ + AND l1_batches.number > $1 \ + ORDER BY number LIMIT $2\ + ) inn \ + WHERE number - row_number = $1", + last_proved_block_number.0 as i32, + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + self.map_l1_batches(raw_batches).await } - pub fn get_ready_for_execute_blocks( + pub async fn get_ready_for_execute_blocks( &mut self, limit: usize, - l1_batch_min_age_before_execute_seconds: Option, + max_l1_batch_timestamp_millis: Option, ) -> Vec { - async_std::task::block_on(async { - let l1_batches = match l1_batch_min_age_before_execute_seconds { - None => sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches \ - WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ - ORDER BY number LIMIT $1", - limit as i32, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(), - Some(l1_batch_min_age_before_execute_seconds) => { - let max_l1_batch_timestamp_seconds = - unix_timestamp_ms() / 1000 - l1_batch_min_age_before_execute_seconds; - - sqlx::query_as!( - StorageBlock, - "SELECT l1_batches.* FROM l1_batches \ - JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) \ - JOIN eth_txs_history as commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) \ - WHERE commit_tx.confirmed_at IS NOT NULL \ - AND eth_prove_tx_id IS NOT NULL \ - AND eth_execute_tx_id IS NULL \ - AND EXTRACT(epoch from commit_tx.confirmed_at) < $1 \ - ORDER BY number LIMIT $2", - max_l1_batch_timestamp_seconds as i32, - limit as i32, - ) - .fetch_all(self.storage.conn()) + let raw_batches = match max_l1_batch_timestamp_millis { + None => sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ + ORDER BY number LIMIT $1", + limit as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(), + + Some(max_l1_batch_timestamp_millis) => { + // Do not lose the precision here, otherwise we can skip some L1 batches. + // Mostly needed for tests. + let max_l1_batch_timestamp_seconds = max_l1_batch_timestamp_millis as f64 / 1_000.0; + self.raw_ready_for_execute_blocks(max_l1_batch_timestamp_seconds, limit) .await - .unwrap() - } - }; - l1_batches - .into_iter() - .map(|block| { - self.get_block_with_metadata(block) - .expect("Block should be complete") - }) - .collect() - }) + } + }; + + self.map_l1_batches(raw_batches).await } - pub fn get_ready_for_commit_blocks( + async fn raw_ready_for_execute_blocks( &mut self, + max_l1_batch_timestamp_seconds: f64, limit: usize, - bootloader_hash: H256, - default_aa_hash: H256, - ) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query_as!( + ) -> Vec { + // We need to find the first L1 batch that is supposed to be executed. + // Here we ignore the time delay, so we just take the first L1 batch that is ready for execution. + let row = sqlx::query!( + "SELECT number FROM l1_batches \ + WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL \ + ORDER BY number LIMIT 1" + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + + let Some(row) = row else { return vec![] }; + let expected_started_point = row.number; + + // Find the last L1 batch that is ready for execution. + let row = sqlx::query!( + "SELECT max(l1_batches.number) FROM l1_batches \ + JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) \ + JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) \ + WHERE commit_tx.confirmed_at IS NOT NULL \ + AND eth_prove_tx_id IS NOT NULL \ + AND eth_execute_tx_id IS NULL \ + AND EXTRACT(epoch FROM commit_tx.confirmed_at) < $1", + max_l1_batch_timestamp_seconds, + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + if let Some(max_ready_to_send_block) = row.max { + // If we found at least one ready to execute batch then we can simply return all blocks between + // the expected started point and the max ready to send block because we send them to the L1 sequentially. + assert!(max_ready_to_send_block >= expected_started_point); + sqlx::query_as!( StorageBlock, - "SELECT * FROM l1_batches - WHERE eth_commit_tx_id IS NULL - AND number != 0 - AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 - AND commitment IS NOT NULL + "SELECT * FROM l1_batches \ + WHERE number BETWEEN $1 AND $2 \ ORDER BY number LIMIT $3", - bootloader_hash.as_bytes(), - default_aa_hash.as_bytes(), - limit as i64, + expected_started_point as i32, + max_ready_to_send_block, + limit as i32, ) .fetch_all(self.storage.conn()) .await - .unwrap(); - l1_batches - .into_iter() - .map(|block| { - self.get_block_with_metadata(block) - .expect("Block should be complete") - }) - .collect() - }) + .unwrap() + } else { + vec![] + } } - pub fn get_block_state_root(&mut self, number: L1BatchNumber) -> Option { - async_std::task::block_on(async { - let hash: Option<_> = sqlx::query!( - "SELECT hash FROM l1_batches WHERE number = $1", - number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .and_then(|row| row.hash) - .map(|hash| H256::from_slice(&hash)); - hash - }) + pub async fn get_ready_for_commit_blocks( + &mut self, + limit: usize, + bootloader_hash: H256, + default_aa_hash: H256, + ) -> Vec { + let raw_batches = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches \ + WHERE eth_commit_tx_id IS NULL \ + AND number != 0 \ + AND bootloader_code_hash = $1 AND default_aa_code_hash = $2 \ + AND commitment IS NOT NULL \ + ORDER BY number LIMIT $3", + bootloader_hash.as_bytes(), + default_aa_hash.as_bytes(), + limit as i64, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + self.map_l1_batches(raw_batches).await } - pub fn get_merkle_state_root(&mut self, number: L1BatchNumber) -> Option { - async_std::task::block_on(async { - let hash: Option> = sqlx::query!( - "SELECT merkle_root_hash FROM l1_batches WHERE number = $1", - number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .merkle_root_hash; - hash.map(|hash| H256::from_slice(&hash)) - }) + pub async fn get_block_state_root(&mut self, number: L1BatchNumber) -> Option { + sqlx::query!( + "SELECT hash FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .and_then(|row| row.hash) + .map(|hash| H256::from_slice(&hash)) } - pub fn get_newest_block_header(&mut self) -> L1BatchHeader { - async_std::task::block_on(async { - let last_block = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches - ORDER BY number DESC - LIMIT 1" - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - last_block.into() - }) + pub async fn get_block_state_root_and_timestamp( + &mut self, + number: L1BatchNumber, + ) -> Option<(H256, u64)> { + let row = sqlx::query!( + "SELECT timestamp, hash FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + + Some((H256::from_slice(&row.hash?), row.timestamp as u64)) } - pub fn get_block_metadata(&mut self, number: L1BatchNumber) -> Option { - async_std::task::block_on(async { - let l1_batch: Option = sqlx::query_as!( - StorageBlock, - "SELECT * FROM l1_batches WHERE number = $1", - number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); + pub async fn get_newest_block_header(&mut self) -> L1BatchHeader { + let last_block = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches ORDER BY number DESC LIMIT 1" + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); - l1_batch.and_then(|bl| self.get_block_with_metadata(bl)) - }) + last_block.into() } - pub fn get_block_with_metadata( + pub async fn get_block_metadata(&mut self, number: L1BatchNumber) -> Option { + let l1_batch: Option = sqlx::query_as!( + StorageBlock, + "SELECT * FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + + if let Some(bl) = l1_batch { + self.get_block_with_metadata(bl).await + } else { + None + } + } + + pub async fn get_block_with_metadata( &mut self, storage_block: StorageBlock, ) -> Option { - async_std::task::block_on(async { - let unsorted_factory_deps = - self.get_l1_batch_factory_deps(L1BatchNumber(storage_block.number as u32)); - let block_header = storage_block.clone().try_into().ok()?; - let block_metadata = storage_block.try_into().ok()?; - - Some(BlockWithMetadata::new( - block_header, - block_metadata, - unsorted_factory_deps, - )) - }) + let unsorted_factory_deps = self + .get_l1_batch_factory_deps(L1BatchNumber(storage_block.number as u32)) + .await; + let block_header = storage_block.clone().into(); + let block_metadata = storage_block.try_into().ok()?; + + Some(BlockWithMetadata::new( + block_header, + block_metadata, + unsorted_factory_deps, + )) } - pub fn get_l1_batch_factory_deps( + pub async fn get_l1_batch_factory_deps( &mut self, l1_batch_number: L1BatchNumber, ) -> HashMap> { - async_std::task::block_on(async { - sqlx::query!( - "SELECT bytecode_hash, bytecode FROM factory_deps - INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number - WHERE miniblocks.l1_batch_number = $1", - l1_batch_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (H256::from_slice(&row.bytecode_hash), row.bytecode)) - .collect() - }) + sqlx::query!( + "SELECT bytecode_hash, bytecode FROM factory_deps \ + INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number \ + WHERE miniblocks.l1_batch_number = $1", + l1_batch_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (H256::from_slice(&row.bytecode_hash), row.bytecode)) + .collect() } - pub fn delete_l1_batches(&mut self, block_number: L1BatchNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM l1_batches WHERE number > $1", - block_number.0 as i64 - ) + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. + pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) { + self.delete_l1_batches_inner(Some(last_batch_to_keep)).await; + } + + async fn delete_l1_batches_inner(&mut self, last_batch_to_keep: Option) { + let block_number = last_batch_to_keep.map_or(-1, |number| number.0 as i64); + sqlx::query!("DELETE FROM l1_batches WHERE number > $1", block_number) .execute(self.storage.conn()) .await .unwrap(); - }) } - pub fn delete_miniblocks(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM miniblocks WHERE number > $1", - block_number.0 as i64 - ) + /// Deletes all miniblocks from the storage so that the specified miniblock number is the last one left. + pub async fn delete_miniblocks(&mut self, last_miniblock_to_keep: MiniblockNumber) { + self.delete_miniblocks_inner(Some(last_miniblock_to_keep)) + .await + } + + async fn delete_miniblocks_inner(&mut self, last_miniblock_to_keep: Option) { + let block_number = last_miniblock_to_keep.map_or(-1, |number| number.0 as i64); + sqlx::query!("DELETE FROM miniblocks WHERE number > $1", block_number) .execute(self.storage.conn()) .await .unwrap(); - }) } /// Returns sum of predicted gas costs or given block range. /// Panics if the sum doesn't fit into usize. - pub fn get_blocks_predicted_gas( + pub async fn get_blocks_predicted_gas( &mut self, from_block: L1BatchNumber, to_block: L1BatchNumber, op_type: AggregatedActionType, ) -> u32 { - async_std::task::block_on(async { - let column_name = match op_type { - AggregatedActionType::CommitBlocks => "predicted_commit_gas_cost", - AggregatedActionType::PublishProofBlocksOnchain => "predicted_prove_gas_cost", - AggregatedActionType::ExecuteBlocks => "predicted_execute_gas_cost", - }; - let sql_query_str = format!( - " - SELECT COALESCE(SUM({}),0) as sum FROM l1_batches - WHERE number BETWEEN {} AND {} - ", - column_name, from_block, to_block - ); - sqlx::query(&sql_query_str) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .get::("sum") - .to_u32() - .expect("Sum of predicted gas costs should fit into u32") - }) + let column_name = match op_type { + AggregatedActionType::CommitBlocks => "predicted_commit_gas_cost", + AggregatedActionType::PublishProofBlocksOnchain => "predicted_prove_gas_cost", + AggregatedActionType::ExecuteBlocks => "predicted_execute_gas_cost", + }; + let sql_query_str = format!( + "SELECT COALESCE(SUM({column_name}), 0) AS sum FROM l1_batches \ + WHERE number BETWEEN $1 AND $2" + ); + sqlx::query(&sql_query_str) + .bind(from_block.0 as i64) + .bind(to_block.0 as i64) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .get::("sum") + .to_u32() + .expect("Sum of predicted gas costs should fit into u32") } - pub fn update_predicted_block_commit_gas( + pub async fn update_predicted_block_commit_gas( &mut self, l1_batch_number: L1BatchNumber, predicted_gas_cost: u32, ) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE l1_batches - SET predicted_commit_gas_cost = $2, updated_at = now() - WHERE number = $1 - ", - l1_batch_number.0 as i64, - predicted_gas_cost as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + sqlx::query!( + "UPDATE l1_batches \ + SET predicted_commit_gas_cost = $2, updated_at = now() \ + WHERE number = $1", + l1_batch_number.0 as i64, + predicted_gas_cost as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn get_miniblock_range_of_l1_batch( + pub async fn get_miniblock_range_of_l1_batch( &mut self, l1_batch_number: L1BatchNumber, ) -> Option<(MiniblockNumber, MiniblockNumber)> { - async_std::task::block_on(async { - let row = sqlx::query!( - r#" - SELECT MIN(miniblocks.number) as "min?", MAX(miniblocks.number) as "max?" - FROM miniblocks - WHERE l1_batch_number = $1 - "#, - l1_batch_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - match (row.min, row.max) { - (Some(min), Some(max)) => { - Some((MiniblockNumber(min as u32), MiniblockNumber(max as u32))) - } - (None, None) => None, - _ => unreachable!(), - } - }) + let row = sqlx::query!( + "SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\" \ + FROM miniblocks \ + WHERE l1_batch_number = $1", + l1_batch_number.0 as i64 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + Some(( + MiniblockNumber(row.min? as u32), + MiniblockNumber(row.max? as u32), + )) } /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored miniblock that isn't assigned /// to any batch yet). - pub fn pending_batch_exists(&mut self) -> bool { - async_std::task::block_on(async { - let count = sqlx::query_scalar!( - r#"SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL"# - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .unwrap_or(0); - count != 0 - }) + pub async fn pending_batch_exists(&mut self) -> bool { + let count = sqlx::query_scalar!( + "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .unwrap_or(0); + + count != 0 } - pub fn get_last_l1_batch_number_with_witness_inputs(&mut self) -> L1BatchNumber { - async_std::task::block_on(async { - sqlx::query!( - r#" - SELECT MAX(l1_batch_number) FROM witness_inputs - WHERE merkel_tree_paths_blob_url IS NOT NULL - "#, - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .max + pub async fn get_last_l1_batch_number_with_witness_inputs(&mut self) -> L1BatchNumber { + let row = sqlx::query!( + "SELECT MAX(l1_batch_number) FROM witness_inputs \ + WHERE merkel_tree_paths_blob_url IS NOT NULL", + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + row.max .map(|l1_batch_number| L1BatchNumber(l1_batch_number as u32)) .unwrap_or_default() - }) } - pub fn get_l1_batches_with_blobs_in_db(&mut self, limit: u8) -> Vec { - async_std::task::block_on(async { - let l1_batches = sqlx::query!( - r#" - SELECT l1_batch_number FROM witness_inputs - WHERE length(merkle_tree_paths) <> 0 - ORDER BY l1_batch_number DESC - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - l1_batches - .into_iter() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - .collect() - }) - } + pub async fn get_l1_batches_with_blobs_in_db(&mut self, limit: u8) -> Vec { + let rows = sqlx::query!( + "SELECT l1_batch_number FROM witness_inputs \ + WHERE length(merkle_tree_paths) <> 0 \ + ORDER BY l1_batch_number DESC \ + LIMIT $1", + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); - pub fn purge_blobs_from_db(&mut self, l1_batches: Vec) { - let l1_batches: Vec = l1_batches - .iter() - .map(|l1_batch| l1_batch.0 as i64) - .collect(); - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE witness_inputs - SET merkle_tree_paths='' - WHERE l1_batch_number = ANY($1); - "#, - &l1_batches[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + rows.into_iter() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)) + .collect() } - pub fn get_merkle_tree_paths_blob_urls_to_be_cleaned( + pub async fn get_merkle_tree_paths_blob_urls_to_be_cleaned( &mut self, limit: u8, ) -> Vec<(i64, String)> { - async_std::task::block_on(async { - let job_ids = sqlx::query!( - r#" - SELECT l1_batch_number, merkel_tree_paths_blob_url FROM witness_inputs - WHERE status='successful' AND is_blob_cleaned=FALSE - AND merkel_tree_paths_blob_url is NOT NULL - AND updated_at < NOW() - INTERVAL '30 days' - LIMIT $1; - "#, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - job_ids - .into_iter() - .map(|row| (row.l1_batch_number, row.merkel_tree_paths_blob_url.unwrap())) - .collect() - }) + let rows = sqlx::query!( + "SELECT l1_batch_number, merkel_tree_paths_blob_url \ + FROM witness_inputs \ + WHERE status = 'successful' AND is_blob_cleaned = FALSE \ + AND merkel_tree_paths_blob_url is NOT NULL \ + AND updated_at < NOW() - INTERVAL '30 days' \ + LIMIT $1", + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + rows.into_iter() + .map(|row| (row.l1_batch_number, row.merkel_tree_paths_blob_url.unwrap())) + .collect() } - pub fn mark_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { - async_std::task::block_on(async { - sqlx::query!( - r#" - UPDATE witness_inputs - SET is_blob_cleaned=TRUE - WHERE l1_batch_number = ANY($1); - "#, - &l1_batch_numbers[..] - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + pub async fn mark_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: &[i64]) { + sqlx::query!( + "UPDATE witness_inputs \ + SET is_blob_cleaned = TRUE \ + WHERE l1_batch_number = ANY($1)", + l1_batch_numbers + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + // methods used for measuring Eth tx stage transition latencies + // and emitting metrics base on these measured data + pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> Option { + sqlx::query!( + "SELECT timestamp FROM l1_batches \ + WHERE eth_commit_tx_id IS NULL AND number > 0 \ + ORDER BY number LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.timestamp as u64) + } + + pub async fn oldest_unproved_batch_timestamp(&mut self) -> Option { + sqlx::query!( + "SELECT timestamp FROM l1_batches \ + WHERE eth_prove_tx_id IS NULL AND number > 0 \ + ORDER BY number LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.timestamp as u64) + } + + pub async fn oldest_unexecuted_batch_timestamp(&mut self) -> Option { + sqlx::query!( + "SELECT timestamp FROM l1_batches \ + WHERE eth_execute_tx_id IS NULL AND number > 0 \ + ORDER BY number LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.timestamp as u64) } } +/// These functions should only be used for tests. impl BlocksDal<'_, '_> { - // This function is only used for tests. // The actual l1 batch hash is only set by the metadata calculator. - pub fn set_l1_batch_hash(&mut self, batch_num: L1BatchNumber, hash: H256) { - async_std::task::block_on(async { - sqlx::query!( - " - UPDATE l1_batches - SET hash = $1 - WHERE number = $2 - ", - hash.as_bytes(), - batch_num.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + pub async fn set_l1_batch_hash(&mut self, batch_num: L1BatchNumber, hash: H256) { + sqlx::query!( + "UPDATE l1_batches SET hash = $1 WHERE number = $2", + hash.as_bytes(), + batch_num.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + /// Deletes all miniblocks and L1 batches, including the genesis ones. Should only be used in tests. + pub async fn delete_genesis(&mut self) { + self.delete_miniblocks_inner(None).await; + self.delete_l1_batches_inner(None).await; + } +} + +#[cfg(test)] +mod tests { + use db_test_macro::db_test; + use zksync_contracts::BaseSystemContractsHashes; + use zksync_types::Address; + + use super::*; + use crate::ConnectionPool; + + #[db_test(dal_crate)] + async fn getting_predicted_gas(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + + let mut header = L1BatchHeader::new( + L1BatchNumber(1), + 100, + Address::default(), + BaseSystemContractsHashes::default(), + ); + let mut predicted_gas = BlockGasCount { + commit: 2, + prove: 3, + execute: 10, + }; + conn.blocks_dal() + .insert_l1_batch(&header, predicted_gas) + .await; + + header.number = L1BatchNumber(2); + header.timestamp += 100; + predicted_gas += predicted_gas; + conn.blocks_dal() + .insert_l1_batch(&header, predicted_gas) + .await; + + let action_types_and_predicted_gas = [ + (AggregatedActionType::ExecuteBlocks, 10), + (AggregatedActionType::CommitBlocks, 2), + (AggregatedActionType::PublishProofBlocksOnchain, 3), + ]; + for (action_type, expected_gas) in action_types_and_predicted_gas { + let gas = conn + .blocks_dal() + .get_blocks_predicted_gas(L1BatchNumber(1), L1BatchNumber(1), action_type) + .await; + assert_eq!(gas, expected_gas); + + let gas = conn + .blocks_dal() + .get_blocks_predicted_gas(L1BatchNumber(2), L1BatchNumber(2), action_type) + .await; + assert_eq!(gas, 2 * expected_gas); + + let gas = conn + .blocks_dal() + .get_blocks_predicted_gas(L1BatchNumber(1), L1BatchNumber(2), action_type) + .await; + assert_eq!(gas, 3 * expected_gas); + } } } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 6b8a16cac9f1..1321695cfa34 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,450 +1,500 @@ -use crate::models::storage_block::{ - bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql, -}; -use crate::models::storage_transaction::{extract_web3_transaction, web3_transaction_select_sql}; -use crate::SqlxError; -use crate::StorageProcessor; use bigdecimal::BigDecimal; -use sqlx::postgres::PgArguments; -use sqlx::query::Query; -use sqlx::{Postgres, Row}; +use sqlx::Row; + use std::time::Instant; -use vm::utils::BLOCK_GAS_LIMIT; -use zksync_config::constants::EMPTY_UNCLES_HASH; -use crate::models::storage_transaction::CallTrace; -use zksync_types::api::{self, Block, BlockId, TransactionVariant}; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::vm_trace::Call; -use zksync_types::web3::types::{BlockHeader, U64}; -use zksync_types::{L1BatchNumber, L2ChainId, MiniblockNumber, H160, H256, U256}; +use zksync_config::constants::EMPTY_UNCLES_HASH; +use zksync_types::{ + api, + l2_to_l1_log::L2ToL1Log, + vm_trace::Call, + web3::types::{BlockHeader, U64}, + zk_evm::zkevm_opcode_defs::system_params, + Bytes, L1BatchNumber, L2ChainId, MiniblockNumber, H160, H2048, H256, U256, +}; use zksync_utils::{bigdecimal_to_u256, miniblock_hash}; -use zksync_web3_decl::error::Web3Error; + +use crate::models::{ + storage_block::{bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql}, + storage_transaction::{extract_web3_transaction, web3_transaction_select_sql, CallTrace}, +}; +use crate::{SqlxError, StorageProcessor}; + +const BLOCK_GAS_LIMIT: u32 = system_params::VM_INITIAL_FRAME_ERGS; #[derive(Debug)] pub struct BlocksWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl BlocksWeb3Dal<'_, '_> { - pub fn get_sealed_miniblock_number(&mut self) -> Result { - async_std::task::block_on(async { - let started_at = Instant::now(); - let number: i64 = sqlx::query!(r#"SELECT MAX(number) as "number" FROM miniblocks"#) - .fetch_one(self.storage.conn()) - .await? - .number - .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); - Ok(MiniblockNumber(number as u32)) - }) + pub async fn get_sealed_miniblock_number(&mut self) -> Result { + let started_at = Instant::now(); + let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM miniblocks") + .fetch_one(self.storage.conn()) + .await? + .number + .expect("DAL invocation before genesis"); + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); + Ok(MiniblockNumber(number as u32)) } - pub fn get_sealed_l1_batch_number(&mut self) -> Result { - async_std::task::block_on(async { - let started_at = Instant::now(); - let number: i64 = sqlx::query!(r#"SELECT MAX(number) as "number" FROM l1_batches"#) - .fetch_one(self.storage.conn()) - .await? - .number - .expect("DAL invocation before genesis"); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); - Ok(L1BatchNumber(number as u32)) - }) + pub async fn get_sealed_l1_batch_number(&mut self) -> Result { + let started_at = Instant::now(); + let number: i64 = sqlx::query!("SELECT MAX(number) as \"number\" FROM l1_batches") + .fetch_one(self.storage.conn()) + .await? + .number + .expect("DAL invocation before genesis"); + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_sealed_block_number"); + Ok(L1BatchNumber(number as u32)) } - pub fn get_block_by_web3_block_id( + pub async fn get_block_by_web3_block_id( &mut self, - block_id: BlockId, + block_id: api::BlockId, include_full_transactions: bool, chain_id: L2ChainId, - ) -> Result>, SqlxError> { - async_std::task::block_on(async { - let transactions_sql = if include_full_transactions { - web3_transaction_select_sql() - } else { - "transactions.hash as tx_hash" - }; - - let query = format!( - " - SELECT - miniblocks.hash as block_hash, - miniblocks.number, - miniblocks.l1_batch_number, - miniblocks.timestamp, - miniblocks.base_fee_per_gas, - l1_batches.timestamp as l1_batch_timestamp, - transactions.gas_limit as gas_limit, - transactions.refunded_gas as refunded_gas, - {} - FROM miniblocks - LEFT JOIN l1_batches - ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN transactions - ON transactions.miniblock_number = miniblocks.number - WHERE {} - ", - transactions_sql, - web3_block_where_sql(block_id, 1) - ); - - let query: Query = - bind_block_where_sql_params(block_id, sqlx::query(&query)); - - let block = query - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .fold( - Option::>::None, - |prev_block, db_row| { - let mut block: Block = prev_block.unwrap_or({ - // This code will be only executed for the first row in the DB response. - // All other rows will only be used to extract relevant transactions. - let hash = db_row - .try_get("block_hash") - .map(H256::from_slice) - .unwrap_or_else(|_| H256::zero()); - let number = U64::from(db_row.get::("number")); - let l1_batch_number = db_row - .try_get::("l1_batch_number") - .map(U64::from) - .ok(); - let l1_batch_timestamp = db_row - .try_get::("l1_batch_timestamp") - .map(U256::from) - .ok(); - let parent_hash = match number.as_u32() { - 0 => H256::zero(), - number => miniblock_hash(MiniblockNumber(number - 1)), - }; - - Block { - hash, - parent_hash, - uncles_hash: EMPTY_UNCLES_HASH, - author: H160::zero(), - state_root: H256::zero(), - transactions_root: H256::zero(), - receipts_root: H256::zero(), - number, - l1_batch_number, - gas_used: Default::default(), - gas_limit: BLOCK_GAS_LIMIT.into(), - base_fee_per_gas: bigdecimal_to_u256( - db_row.get::("base_fee_per_gas"), - ), - extra_data: Default::default(), - // todo logs - logs_bloom: Default::default(), - timestamp: U256::from(db_row.get::("timestamp")), - l1_batch_timestamp, - difficulty: Default::default(), - total_difficulty: Default::default(), - seal_fields: vec![], - uncles: vec![], - transactions: Vec::default(), - size: Default::default(), - mix_hash: Default::default(), - nonce: Default::default(), - } - }); - if db_row.try_get::<&[u8], &str>("tx_hash").is_ok() { - let tx_gas_limit: U256 = - bigdecimal_to_u256(db_row.get::("gas_limit")); - let tx_refunded_gas: U256 = - ((db_row.get::("refunded_gas")) as u32).into(); - - block.gas_used += tx_gas_limit - tx_refunded_gas; - let tx = if include_full_transactions { - TransactionVariant::Full(extract_web3_transaction(db_row, chain_id)) - } else { - TransactionVariant::Hash(H256::from_slice(db_row.get("tx_hash"))) - }; - block.transactions.push(tx); - } - Some(block) - }, - ); - Ok(block) - }) + ) -> Result>, SqlxError> { + let transactions_sql = if include_full_transactions { + web3_transaction_select_sql() + } else { + "transactions.hash as tx_hash" + }; + + let query = format!( + "SELECT + miniblocks.hash as block_hash, + miniblocks.number, + miniblocks.l1_batch_number, + miniblocks.timestamp, + miniblocks.base_fee_per_gas, + l1_batches.timestamp as l1_batch_timestamp, + transactions.gas_limit as gas_limit, + transactions.refunded_gas as refunded_gas, + {} + FROM miniblocks + LEFT JOIN l1_batches + ON l1_batches.number = miniblocks.l1_batch_number + LEFT JOIN transactions + ON transactions.miniblock_number = miniblocks.number + WHERE {}", + transactions_sql, + web3_block_where_sql(block_id, 1) + ); + + let query = bind_block_where_sql_params(&block_id, sqlx::query(&query)); + let rows = query.fetch_all(self.storage.conn()).await?.into_iter(); + + let block = rows.fold(None, |prev_block, db_row| { + let mut block = prev_block.unwrap_or_else(|| { + // This code will be only executed for the first row in the DB response. + // All other rows will only be used to extract relevant transactions. + let hash = db_row + .try_get("block_hash") + .map_or_else(|_| H256::zero(), H256::from_slice); + let number = U64::from(db_row.get::("number")); + let l1_batch_number = db_row + .try_get::("l1_batch_number") + .map(U64::from) + .ok(); + let l1_batch_timestamp = db_row + .try_get::("l1_batch_timestamp") + .map(U256::from) + .ok(); + let parent_hash = match number.as_u32() { + 0 => H256::zero(), + number => miniblock_hash(MiniblockNumber(number - 1)), + }; + let base_fee_per_gas = db_row.get::("base_fee_per_gas"); + + api::Block { + hash, + parent_hash, + uncles_hash: EMPTY_UNCLES_HASH, + number, + l1_batch_number, + gas_limit: BLOCK_GAS_LIMIT.into(), + base_fee_per_gas: bigdecimal_to_u256(base_fee_per_gas), + timestamp: db_row.get::("timestamp").into(), + l1_batch_timestamp, + ..api::Block::default() + } + }); + if db_row.try_get::<&[u8], &str>("tx_hash").is_ok() { + let tx_gas_limit = bigdecimal_to_u256(db_row.get::("gas_limit")); + let tx_refunded_gas = U256::from((db_row.get::("refunded_gas")) as u32); + + block.gas_used += tx_gas_limit - tx_refunded_gas; + let tx = if include_full_transactions { + let tx = extract_web3_transaction(db_row, chain_id); + api::TransactionVariant::Full(tx) + } else { + api::TransactionVariant::Hash(H256::from_slice(db_row.get("tx_hash"))) + }; + block.transactions.push(tx); + } + Some(block) + }); + Ok(block) } - pub fn get_block_tx_count(&mut self, block_id: BlockId) -> Result, SqlxError> { - async_std::task::block_on(async { - let query = format!( - "SELECT l1_tx_count + l2_tx_count as tx_count FROM miniblocks WHERE {}", - web3_block_where_sql(block_id, 1) - ); - let query: Query = - bind_block_where_sql_params(block_id, sqlx::query(&query)); - - let tx_count: Option = query - .fetch_optional(self.storage.conn()) - .await? - .map(|db_row| db_row.get("tx_count")); - - Ok(tx_count.map(|t| (t as u32).into())) - }) + pub async fn get_block_tx_count( + &mut self, + block_id: api::BlockId, + ) -> Result, SqlxError> { + let query = format!( + "SELECT l1_tx_count + l2_tx_count as tx_count FROM miniblocks WHERE {}", + web3_block_where_sql(block_id, 1) + ); + let query = bind_block_where_sql_params(&block_id, sqlx::query(&query)); + + let tx_count: Option = query + .fetch_optional(self.storage.conn()) + .await? + .map(|db_row| db_row.get("tx_count")); + Ok(tx_count.map(|t| (t as u32).into())) } /// Returns hashes of blocks with numbers greater than `from_block` and the number of the last block. - pub fn get_block_hashes_after( + pub async fn get_block_hashes_after( &mut self, from_block: MiniblockNumber, limit: usize, ) -> Result<(Vec, Option), SqlxError> { - async_std::task::block_on(async { - let records = sqlx::query!( - " - SELECT number, hash FROM miniblocks - WHERE number > $1 - ORDER BY number ASC - LIMIT $2 - ", - from_block.0 as i64, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await?; - let last_block_number = records - .last() - .map(|record| MiniblockNumber(record.number as u32)); - let hashes = records - .into_iter() - .map(|record| H256::from_slice(&record.hash)) - .collect(); - Ok((hashes, last_block_number)) - }) + let rows = sqlx::query!( + "SELECT number, hash FROM miniblocks \ + WHERE number > $1 \ + ORDER BY number ASC \ + LIMIT $2", + from_block.0 as i64, + limit as i32 + ) + .fetch_all(self.storage.conn()) + .await?; + + let last_block_number = rows.last().map(|row| MiniblockNumber(row.number as u32)); + let hashes = rows.iter().map(|row| H256::from_slice(&row.hash)).collect(); + Ok((hashes, last_block_number)) } /// Returns hashes of blocks with numbers greater than `from_block` and the number of the last block. - pub fn get_block_headers_after( + pub async fn get_block_headers_after( &mut self, from_block: MiniblockNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let records = sqlx::query!( - " - SELECT - hash, - number, - timestamp - FROM miniblocks - WHERE number > $1 - ORDER BY number ASC - ", - from_block.0 as i64, - ) - .fetch_all(self.storage.conn()) - .await?; - let blocks: Vec = records - .into_iter() - .map(|db_row| BlockHeader { - hash: Some(H256::from_slice(&db_row.hash)), - parent_hash: H256::zero(), - uncles_hash: EMPTY_UNCLES_HASH, - author: H160::zero(), - state_root: H256::zero(), - transactions_root: H256::zero(), - receipts_root: H256::zero(), - number: Some(U64::from(db_row.number)), - gas_used: Default::default(), - gas_limit: Default::default(), - base_fee_per_gas: Default::default(), - extra_data: Default::default(), - // todo logs - logs_bloom: Default::default(), - timestamp: U256::from(db_row.timestamp), - difficulty: Default::default(), - mix_hash: None, - nonce: None, - }) - .collect(); - Ok(blocks) - }) + let rows = sqlx::query!( + "SELECT hash, number, timestamp \ + FROM miniblocks \ + WHERE number > $1 \ + ORDER BY number ASC", + from_block.0 as i64, + ) + .fetch_all(self.storage.conn()) + .await?; + + let blocks = rows.into_iter().map(|row| BlockHeader { + hash: Some(H256::from_slice(&row.hash)), + parent_hash: H256::zero(), + uncles_hash: EMPTY_UNCLES_HASH, + author: H160::zero(), + state_root: H256::zero(), + transactions_root: H256::zero(), + receipts_root: H256::zero(), + number: Some(U64::from(row.number)), + gas_used: U256::zero(), + gas_limit: U256::zero(), + base_fee_per_gas: None, + extra_data: Bytes::default(), + logs_bloom: H2048::default(), + timestamp: U256::from(row.timestamp), + difficulty: U256::zero(), + mix_hash: None, + nonce: None, + }); + Ok(blocks.collect()) } - pub fn resolve_block_id( + pub async fn resolve_block_id( &mut self, - block_id: BlockId, - ) -> Result, SqlxError> { - async_std::task::block_on(async { - let query_string = match block_id { - BlockId::Hash(_) => "SELECT number FROM miniblocks WHERE hash = $1".to_string(), - BlockId::Number(api::BlockNumber::Number(block_number)) => { - // The reason why instead of returning the `block_number` directly we use query is - // to handle numbers of blocks that are not created yet. - // the `SELECT number FROM miniblocks WHERE number=block_number` for - // non-existing block number will returns zero. - format!( - "SELECT number FROM miniblocks WHERE number = {}", - block_number - ) - } - BlockId::Number(api::BlockNumber::Earliest) => { - return Ok(Ok(MiniblockNumber(0))); - } - BlockId::Number(block_number) => web3_block_number_to_sql(block_number), - }; - let row = bind_block_where_sql_params(block_id, sqlx::query(&query_string)) - .fetch_optional(self.storage.conn()) - .await?; - - let block_number = row - .and_then(|row| row.get::, &str>("number")) - .map(|n| MiniblockNumber(n as u32)) - .ok_or(Web3Error::NoBlock); - Ok(block_number) - }) + block_id: api::BlockId, + ) -> Result, SqlxError> { + let query_string = match block_id { + api::BlockId::Hash(_) => "SELECT number FROM miniblocks WHERE hash = $1".to_owned(), + api::BlockId::Number(api::BlockNumber::Number(_)) => { + // The reason why instead of returning the `block_number` directly we use query is + // to handle numbers of blocks that are not created yet. + // the `SELECT number FROM miniblocks WHERE number=block_number` for + // non-existing block number will returns zero. + "SELECT number FROM miniblocks WHERE number = $1".to_owned() + } + api::BlockId::Number(api::BlockNumber::Earliest) => { + return Ok(Some(MiniblockNumber(0))); + } + api::BlockId::Number(block_number) => web3_block_number_to_sql(block_number), + }; + let row = bind_block_where_sql_params(&block_id, sqlx::query(&query_string)) + .fetch_optional(self.storage.conn()) + .await?; + + let block_number = row + .and_then(|row| row.get::, &str>("number")) + .map(|n| MiniblockNumber(n as u32)); + Ok(block_number) } - pub fn get_block_timestamp( + pub async fn get_block_timestamp( &mut self, block_number: MiniblockNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let timestamp = sqlx::query!( - r#"SELECT timestamp FROM miniblocks WHERE number = $1"#, - block_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.timestamp as u64); - Ok(timestamp) - }) + let timestamp = sqlx::query!( + "SELECT timestamp FROM miniblocks WHERE number = $1", + block_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.timestamp as u64); + Ok(timestamp) } - pub fn get_l2_to_l1_logs( + pub async fn get_l2_to_l1_logs( &mut self, block_number: L1BatchNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let result: Vec> = sqlx::query!( - "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1", - block_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.l2_to_l1_logs) - .unwrap_or_else(Vec::new); + let raw_logs = sqlx::query!( + "SELECT l2_to_l1_logs FROM l1_batches WHERE number = $1", + block_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.l2_to_l1_logs) + .unwrap_or_default(); - Ok(result.into_iter().map(L2ToL1Log::from).collect()) - }) + Ok(raw_logs + .into_iter() + .map(|bytes| L2ToL1Log::from_slice(&bytes)) + .collect()) } - pub fn get_l1_batch_number_of_miniblock( + pub async fn get_l1_batch_number_of_miniblock( &mut self, miniblock_number: MiniblockNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let number: Option = sqlx::query!( - " - SELECT l1_batch_number FROM miniblocks - WHERE number = $1 - ", - miniblock_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .and_then(|row| row.l1_batch_number); - Ok(number.map(|number| L1BatchNumber(number as u32))) - }) + let number: Option = sqlx::query!( + "SELECT l1_batch_number FROM miniblocks WHERE number = $1", + miniblock_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await? + .and_then(|row| row.l1_batch_number); + + Ok(number.map(|number| L1BatchNumber(number as u32))) } - pub fn get_miniblock_range_of_l1_batch( + pub async fn get_miniblock_range_of_l1_batch( &mut self, l1_batch_number: L1BatchNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let row = sqlx::query!( - r#" - SELECT MIN(miniblocks.number) as "min?", MAX(miniblocks.number) as "max?" - FROM miniblocks - WHERE l1_batch_number = $1 - "#, - l1_batch_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await?; - match (row.min, row.max) { - (Some(min), Some(max)) => Ok(Some(( - MiniblockNumber(min as u32), - MiniblockNumber(max as u32), - ))), - (None, None) => Ok(None), - _ => unreachable!(), + let row = sqlx::query!( + "SELECT MIN(miniblocks.number) as \"min?\", MAX(miniblocks.number) as \"max?\" \ + FROM miniblocks \ + WHERE l1_batch_number = $1", + l1_batch_number.0 as i64 + ) + .fetch_one(self.storage.conn()) + .await?; + + Ok(match (row.min, row.max) { + (Some(min), Some(max)) => { + Some((MiniblockNumber(min as u32), MiniblockNumber(max as u32))) } + (None, None) => None, + _ => unreachable!(), }) } - pub fn get_l1_batch_info_for_tx( + pub async fn get_l1_batch_info_for_tx( &mut self, tx_hash: H256, ) -> Result, SqlxError> { - async_std::task::block_on(async { - let row = sqlx::query!( - " - SELECT l1_batch_number, l1_batch_tx_index - FROM transactions - WHERE hash = $1 - ", - tx_hash.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await?; - let result = row.and_then(|row| match (row.l1_batch_number, row.l1_batch_tx_index) { - (Some(l1_batch_number), Some(l1_batch_tx_index)) => Some(( - L1BatchNumber(l1_batch_number as u32), - l1_batch_tx_index as u16, - )), - _ => None, - }); - Ok(result) - }) + let row = sqlx::query!( + "SELECT l1_batch_number, l1_batch_tx_index \ + FROM transactions \ + WHERE hash = $1", + tx_hash.as_bytes() + ) + .fetch_optional(self.storage.conn()) + .await?; + + let result = row.and_then(|row| match (row.l1_batch_number, row.l1_batch_tx_index) { + (Some(l1_batch_number), Some(l1_batch_tx_index)) => Some(( + L1BatchNumber(l1_batch_number as u32), + l1_batch_tx_index as u16, + )), + _ => None, + }); + Ok(result) } - pub fn get_trace_for_miniblock(&mut self, block: BlockId) -> Result, Web3Error> { - async_std::task::block_on(async { - let block_number = self.resolve_block_id(block).unwrap()?; - let traces = sqlx::query_as!( - CallTrace, - r#" - SELECT * FROM call_traces WHERE tx_hash IN ( - SELECT hash FROM transactions WHERE miniblock_number = $1 - ) - "#, - block_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(Call::from) - .collect(); - Ok(traces) - }) + pub async fn get_trace_for_miniblock(&mut self, block_number: MiniblockNumber) -> Vec { + sqlx::query_as!( + CallTrace, + "SELECT * FROM call_traces WHERE tx_hash IN \ + (SELECT hash FROM transactions WHERE miniblock_number = $1)", + block_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(Call::from) + .collect() } } #[cfg(test)] mod tests { - use crate::ConnectionPool; + use db_test_macro::db_test; + use zksync_types::{block::MiniblockHeader, MiniblockNumber}; use super::*; - use db_test_macro::db_test; - use zksync_types::{ - api::{BlockId, BlockNumber}, - MiniblockNumber, - }; + use crate::{tests::create_miniblock_header, ConnectionPool}; + + #[db_test(dal_crate)] + async fn getting_web3_block_and_tx_count(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + let header = MiniblockHeader { + l1_tx_count: 3, + l2_tx_count: 5, + ..create_miniblock_header(0) + }; + conn.blocks_dal().insert_miniblock(&header).await; + + let block_ids = [ + api::BlockId::Number(api::BlockNumber::Earliest), + api::BlockId::Number(api::BlockNumber::Latest), + api::BlockId::Number(api::BlockNumber::Number(0.into())), + api::BlockId::Hash(miniblock_hash(MiniblockNumber(0))), + ]; + for block_id in block_ids { + let block = conn + .blocks_web3_dal() + .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .await; + let block = block.unwrap().unwrap(); + assert!(block.transactions.is_empty()); + assert_eq!(block.number, U64::zero()); + assert_eq!(block.hash, miniblock_hash(MiniblockNumber(0))); + + let tx_count = conn.blocks_web3_dal().get_block_tx_count(block_id).await; + assert_eq!(tx_count.unwrap(), Some(8.into())); + } + + let non_existing_block_ids = [ + api::BlockId::Number(api::BlockNumber::Pending), + api::BlockId::Number(api::BlockNumber::Number(1.into())), + api::BlockId::Hash(miniblock_hash(MiniblockNumber(1))), + ]; + for block_id in non_existing_block_ids { + let block = conn + .blocks_web3_dal() + .get_block_by_web3_block_id(block_id, false, L2ChainId(270)) + .await; + assert!(block.unwrap().is_none()); + + let tx_count = conn.blocks_web3_dal().get_block_tx_count(block_id).await; + assert_eq!(tx_count.unwrap(), None); + } + } #[db_test(dal_crate)] - async fn test_resolve_block_id_earliest(connection_pool: ConnectionPool) { - let storage = &mut connection_pool.access_test_storage().await; - let mut block_web3_dal = BlocksWeb3Dal { storage }; - let miniblock_number = - block_web3_dal.resolve_block_id(BlockId::Number(BlockNumber::Earliest)); - assert_eq!(miniblock_number.unwrap().unwrap(), MiniblockNumber(0)); + async fn resolving_earliest_block_id(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Earliest)) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(0))); + } + + #[db_test(dal_crate)] + async fn resolving_latest_block_id(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(0)) + .await; + + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(0))); + + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Number(0.into()))) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(0))); + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Number(1.into()))) + .await; + assert_eq!(miniblock_number.unwrap(), None); + + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(1)) + .await; + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(1))); + + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(2))); + + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Number(1.into()))) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(1))); + } + + #[db_test(dal_crate)] + async fn resolving_block_by_hash(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(0)) + .await; + + let hash = miniblock_hash(MiniblockNumber(0)); + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Hash(hash)) + .await; + assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(0))); + + let hash = miniblock_hash(MiniblockNumber(1)); + let miniblock_number = conn + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Hash(hash)) + .await; + assert_eq!(miniblock_number.unwrap(), None); } } diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index e11abe80edf8..9019d1213fb6 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -1,11 +1,12 @@ // Built-in deps use std::time::{Duration, Instant}; // External imports -use async_std::task::{block_on, sleep}; use sqlx::pool::PoolConnection; use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; // Local imports -use crate::{get_master_database_url, get_replica_database_url, StorageProcessor}; +use crate::{ + get_master_database_url, get_prover_database_url, get_replica_database_url, StorageProcessor, +}; use zksync_utils::parse_env; pub use self::test_pool::TestPool; @@ -19,28 +20,30 @@ pub enum ConnectionPool { Test(TestPool), } +#[derive(Clone, Debug)] +pub enum DbVariant { + Master, + Replica, + Prover, +} + impl ConnectionPool { /// Establishes a pool of the connections to the database and /// creates a new `ConnectionPool` object. /// pool_max_size - number of connections in pool, if not set env variable "DATABASE_POOL_SIZE" is going to be used. - pub fn new(pool_max_size: Option, connect_to_master: bool) -> Self { - let database_url = if connect_to_master { - get_master_database_url() - } else { - get_replica_database_url() + pub async fn new(pool_max_size: Option, db: DbVariant) -> Self { + let database_url = match db { + DbVariant::Master => get_master_database_url(), + DbVariant::Replica => get_replica_database_url(), + DbVariant::Prover => get_prover_database_url(), }; let max_connections = pool_max_size.unwrap_or_else(|| parse_env("DATABASE_POOL_SIZE")); let options = PgPoolOptions::new().max_connections(max_connections); - let pool = block_on(options.connect(&database_url)).unwrap(); + let pool = options.connect(&database_url).await.unwrap(); Self::Real(pool) } - /// WARNING: this method is intentionally private. - /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. - /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). - /// Use blocking counterpart instead. - /// /// Creates a `StorageProcessor` entity over a recoverable connection. /// Upon a database outage connection will block the thread until /// it will be able to recover the connection (or, if connection cannot @@ -49,12 +52,28 @@ impl ConnectionPool { /// /// This method is intended to be used in crucial contexts, where the /// database access is must-have (e.g. block committer). - async fn access_storage(&self) -> StorageProcessor<'_> { + pub async fn access_storage(&self) -> StorageProcessor<'_> { + self.access_storage_inner(None).await + } + + /// A version of `access_storage` that would also expose the duration of the connection + /// acquisition tagged to the `requester` name. + /// + /// WARN: This method should not be used if it will result in too many time series (e.g. + /// from witness generators or provers), otherwise Prometheus won't be able to handle it. + pub async fn access_storage_tagged(&self, requester: &'static str) -> StorageProcessor<'_> { + self.access_storage_inner(Some(requester)).await + } + + async fn access_storage_inner(&self, requester: Option<&'static str>) -> StorageProcessor<'_> { match self { ConnectionPool::Real(real_pool) => { let start = Instant::now(); let conn = Self::acquire_connection_retried(real_pool).await; metrics::histogram!("sql.connection_acquire", start.elapsed()); + if let Some(requester) = requester { + metrics::histogram!("sql.connection_acquire.tagged", start.elapsed(), "requester" => requester); + } StorageProcessor::from_pool(conn) } ConnectionPool::Test(test) => test.access_storage().await, @@ -78,17 +97,13 @@ impl ConnectionPool { // Backing off for one second if facing an error vlog::warn!("Failed to get connection to db. Backing off for 1 second"); - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } // Attempting to get the pooled connection for the last time pool.acquire().await.unwrap() } - pub fn access_storage_blocking(&self) -> StorageProcessor<'_> { - block_on(self.access_storage()) - } - pub async fn access_test_storage(&self) -> StorageProcessor<'static> { match self { ConnectionPool::Test(test) => test.access_storage().await, diff --git a/core/lib/dal/src/connection/test_pool.rs b/core/lib/dal/src/connection/test_pool.rs index 69fda8f6f8c7..7a00eea42201 100644 --- a/core/lib/dal/src/connection/test_pool.rs +++ b/core/lib/dal/src/connection/test_pool.rs @@ -1,11 +1,11 @@ // Built-in deps use std::{fmt, mem, pin::Pin, sync::Arc, time::Duration}; // External imports -use async_std::{ - future::timeout, - sync::{Mutex, MutexGuardArc}, -}; use sqlx::{Acquire, Connection, PgConnection, Postgres, Transaction}; +use tokio::{ + sync::{Mutex, OwnedMutexGuard}, + time::timeout, +}; // Local imports use crate::StorageProcessor; @@ -68,7 +68,7 @@ impl TestPoolInner { #[derive(Debug)] pub struct TestPoolLock { - lock: MutexGuardArc, + lock: OwnedMutexGuard, } impl TestPoolLock { @@ -114,7 +114,7 @@ impl TestPool { pub async fn access_storage(&self) -> StorageProcessor<'static> { const LOCK_TIMEOUT: Duration = Duration::from_secs(1); - let lock = self.inner.lock_arc(); + let lock = self.inner.clone().lock_owned(); let lock = timeout(LOCK_TIMEOUT, lock).await.expect( "Timed out waiting to acquire a lock in test `ConnectionPool`. \ Check the backtrace and make sure that no `StorageProcessor`s are alive", diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 0aa5b697e664..7505d0e0c939 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -14,12 +14,12 @@ use zksync_types::{Address, L1BatchNumber, H256, U256}; #[derive(Debug)] pub struct EthSenderDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl EthSenderDal<'_, '_> { - pub fn get_inflight_txs(&mut self) -> Vec { - async_std::task::block_on(async { + pub async fn get_inflight_txs(&mut self) -> Vec { + { let txs = sqlx::query_as!( StorageEthTx, "SELECT * FROM eth_txs WHERE confirmed_eth_tx_history_id IS NULL @@ -30,11 +30,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); txs.into_iter().map(|tx| tx.into()).collect() - }) + } } - pub fn get_eth_l1_batches(&mut self) -> L1BatchEthSenderStats { - async_std::task::block_on(async { + pub async fn get_eth_l1_batches(&mut self) -> L1BatchEthSenderStats { + { let mut stats = L1BatchEthSenderStats::default(); for tx_type in ["execute_tx", "commit_tx", "prove_tx"] { let mut records= sqlx::query(&format!( @@ -76,11 +76,11 @@ impl EthSenderDal<'_, '_> { } } stats - }) + } } - pub fn get_eth_tx(&mut self, eth_tx_id: u32) -> Option { - async_std::task::block_on(async { + pub async fn get_eth_tx(&mut self, eth_tx_id: u32) -> Option { + { sqlx::query_as!( StorageEthTx, "SELECT * FROM eth_txs WHERE id = $1", @@ -90,11 +90,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap() .map(Into::into) - }) + } } - pub fn get_new_eth_txs(&mut self, limit: u64) -> Vec { - async_std::task::block_on(async { + pub async fn get_new_eth_txs(&mut self, limit: u64) -> Vec { + { let txs = sqlx::query_as!( StorageEthTx, r#"SELECT * FROM eth_txs @@ -108,11 +108,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); txs.into_iter().map(|tx| tx.into()).collect() - }) + } } - pub fn get_unsent_txs(&mut self) -> Vec { - async_std::task::block_on(async { + pub async fn get_unsent_txs(&mut self) -> Vec { + { let txs = sqlx::query_as!( StorageTxHistoryToSend, r#" @@ -133,10 +133,10 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); txs.into_iter().map(|tx| tx.into()).collect() - }) + } } - pub fn save_eth_tx( + pub async fn save_eth_tx( &mut self, nonce: u64, raw_tx: Vec, @@ -144,7 +144,7 @@ impl EthSenderDal<'_, '_> { contract_address: Address, predicted_gas_cost: u32, ) -> EthTx { - async_std::task::block_on(async { + { let address = format!("{:#x}", contract_address); let eth_tx = sqlx::query_as!( StorageEthTx, @@ -161,10 +161,10 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); eth_tx.into() - }) + } } - pub fn insert_tx_history( + pub async fn insert_tx_history( &mut self, eth_tx_id: u32, base_fee_per_gas: u64, @@ -172,7 +172,7 @@ impl EthSenderDal<'_, '_> { tx_hash: H256, raw_signed_tx: Vec, ) -> Option { - async_std::task::block_on(async { + { let priority_fee_per_gas = i64::try_from(priority_fee_per_gas).expect("Can't convert U256 to i64"); let base_fee_per_gas = @@ -195,11 +195,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap() .map(|row| row.id as u32) - }) + } } - pub fn set_sent_at_block(&mut self, eth_txs_history_id: u32, sent_at_block: u32) { - async_std::task::block_on(async { + pub async fn set_sent_at_block(&mut self, eth_txs_history_id: u32, sent_at_block: u32) { + { sqlx::query!( "UPDATE eth_txs_history SET sent_at_block = $2, sent_at = now() WHERE id = $1 AND sent_at_block IS NULL", @@ -209,11 +209,11 @@ impl EthSenderDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn remove_tx_history(&mut self, eth_txs_history_id: u32) { - async_std::task::block_on(async { + pub async fn remove_tx_history(&mut self, eth_txs_history_id: u32) { + { sqlx::query!( "DELETE FROM eth_txs_history WHERE id = $1", @@ -222,11 +222,11 @@ impl EthSenderDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn confirm_tx(&mut self, tx_hash: H256, gas_used: U256) { - async_std::task::block_on(async { + pub async fn confirm_tx(&mut self, tx_hash: H256, gas_used: U256) { + { let mut transaction = self.storage.start_transaction().await; let gas_used = i64::try_from(gas_used).expect("Can't convert U256 to i64"); let tx_hash = format!("{:#x}", tx_hash); @@ -254,11 +254,11 @@ impl EthSenderDal<'_, '_> { .unwrap(); transaction.commit().await; - }) + } } - pub fn get_confirmed_tx_hash_by_eth_tx_id(&mut self, eth_tx_id: u32) -> Option { - async_std::task::block_on(async { + pub async fn get_confirmed_tx_hash_by_eth_tx_id(&mut self, eth_tx_id: u32) -> Option { + { let tx_hash = sqlx::query!( "SELECT tx_hash FROM eth_txs_history WHERE eth_tx_id = $1 AND confirmed_at IS NOT NULL", @@ -273,7 +273,7 @@ impl EthSenderDal<'_, '_> { let tx_hash = tx_hash.trim_start_matches("0x"); H256::from_str(tx_hash).unwrap() }) - }) + } } /// This method inserts a fake transaction into the database that would make the corresponding L1 batch @@ -286,14 +286,14 @@ impl EthSenderDal<'_, '_> { /// /// After this method is used anywhere in the codebase, it is considered a bug to try to directly query `eth_txs_history` /// or `eth_txs` tables. - pub fn insert_bogus_confirmed_eth_tx( + pub async fn insert_bogus_confirmed_eth_tx( &mut self, l1_batch: L1BatchNumber, tx_type: AggregatedActionType, tx_hash: H256, confirmed_at: DateTime, ) { - async_std::task::block_on(async { + { let mut transaction = self.storage.start_transaction().await; let tx_hash = format!("{:#x}", tx_hash); @@ -357,14 +357,15 @@ impl EthSenderDal<'_, '_> { super::BlocksDal { storage: &mut transaction, } - .set_eth_tx_id(l1_batch, l1_batch, eth_tx_id as u32, tx_type); + .set_eth_tx_id(l1_batch, l1_batch, eth_tx_id as u32, tx_type) + .await; transaction.commit().await; - }) + } } - pub fn get_tx_history_to_check(&mut self, eth_tx_id: u32) -> Vec { - async_std::task::block_on(async { + pub async fn get_tx_history_to_check(&mut self, eth_tx_id: u32) -> Vec { + { let tx_history = sqlx::query_as!( StorageTxHistory, "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC", @@ -374,11 +375,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); tx_history.into_iter().map(|tx| tx.into()).collect() - }) + } } - pub fn get_block_number_on_first_sent_attempt(&mut self, eth_tx_id: u32) -> Option { - async_std::task::block_on(async { + pub async fn get_block_number_on_first_sent_attempt(&mut self, eth_tx_id: u32) -> Option { + { let sent_at_block = sqlx::query_scalar!( "SELECT sent_at_block FROM eth_txs_history WHERE eth_tx_id = $1 AND sent_at_block IS NOT NULL ORDER BY created_at ASC LIMIT 1", eth_tx_id as i32 @@ -387,11 +388,11 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); sent_at_block.flatten().map(|block| block as u32) - }) + } } - pub fn get_last_sent_eth_tx(&mut self, eth_tx_id: u32) -> Option { - async_std::task::block_on(async { + pub async fn get_last_sent_eth_tx(&mut self, eth_tx_id: u32) -> Option { + { let history_item = sqlx::query_as!( StorageTxHistory, "SELECT * FROM eth_txs_history WHERE eth_tx_id = $1 ORDER BY created_at DESC LIMIT 1", @@ -401,22 +402,21 @@ impl EthSenderDal<'_, '_> { .await .unwrap(); history_item.map(|tx| tx.into()) - }) + } } - pub fn get_next_nonce(&mut self) -> Option { - async_std::task::block_on(async { - sqlx::query!(r#"SELECT MAX(nonce) as "max_nonce?" FROM eth_txs"#,) - .fetch_one(self.storage.conn()) + pub async fn get_next_nonce(&mut self) -> Option { + { + let row = sqlx::query!("SELECT nonce FROM eth_txs ORDER BY id DESC LIMIT 1") + .fetch_optional(self.storage.conn()) .await - .unwrap() - .max_nonce - .map(|n| n as u64 + 1) - }) + .unwrap(); + row.map(|row| row.nonce as u64 + 1) + } } - pub fn mark_failed_transaction(&mut self, eth_tx_id: u32) { - async_std::task::block_on(async { + pub async fn mark_failed_transaction(&mut self, eth_tx_id: u32) { + { sqlx::query!( "UPDATE eth_txs SET has_failed = TRUE WHERE id = $1", eth_tx_id as i32 @@ -424,22 +424,22 @@ impl EthSenderDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_number_of_failed_transactions(&mut self) -> i64 { - async_std::task::block_on(async { + pub async fn get_number_of_failed_transactions(&mut self) -> i64 { + { sqlx::query!("SELECT COUNT(*) FROM eth_txs WHERE has_failed = TRUE") .fetch_one(self.storage.conn()) .await .unwrap() .count .unwrap() - }) + } } - pub fn clear_failed_transactions(&mut self) { - async_std::task::block_on(async { + pub async fn clear_failed_transactions(&mut self) { + { sqlx::query!( "DELETE FROM eth_txs WHERE id >= (SELECT MIN(id) FROM eth_txs WHERE has_failed = TRUE)" @@ -447,6 +447,6 @@ impl EthSenderDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } } diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index f5c6f637cccb..82141315f03f 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,185 +1,351 @@ -use crate::StorageProcessor; use sqlx::types::chrono::Utc; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::{tx::IncludedTxLocation, MiniblockNumber, VmEvent}; + +use std::fmt; + +use crate::{models::storage_event::StorageL2ToL1Log, SqlxError, StorageProcessor}; +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, tx::IncludedTxLocation, MiniblockNumber, VmEvent, H256, +}; + +/// Wrapper around an optional event topic allowing to hex-format it for `COPY` instructions. +#[derive(Debug)] +struct EventTopic<'a>(Option<&'a H256>); + +impl fmt::LowerHex for EventTopic<'_> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(topic) = self.0 { + fmt::LowerHex::fmt(topic, formatter) + } else { + Ok(()) // Don't write anything + } + } +} #[derive(Debug)] pub struct EventsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl EventsDal<'_, '_> { - pub fn save_events( + /// Saves events for the specified miniblock. + pub async fn save_events( &mut self, block_number: MiniblockNumber, - all_block_events: Vec<(IncludedTxLocation, Vec)>, + all_block_events: &[(IncludedTxLocation, Vec<&VmEvent>)], ) { - async_std::task::block_on(async { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY events( - miniblock_number, tx_hash, tx_index_in_block, address, - event_index_in_block, event_index_in_tx, - topic1, topic2, topic3, topic4, value, - tx_initiator_address, - created_at, updated_at - ) - FROM STDIN WITH (DELIMITER '|')", - ) - .await - .unwrap(); - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - let mut event_index_in_block = 0u32; - let mut event_index_in_tx: u32; - for ( - IncludedTxLocation { - tx_hash, - tx_index_in_miniblock: tx_index_in_block, + let mut copy = self + .storage + .conn() + .copy_in_raw( + "COPY events( + miniblock_number, tx_hash, tx_index_in_block, address, + event_index_in_block, event_index_in_tx, + topic1, topic2, topic3, topic4, value, tx_initiator_address, - }, - events, - ) in all_block_events - { - event_index_in_tx = 0; - let tx_hash_str = format!("\\\\x{}", hex::encode(tx_hash.0)); - let tx_initiator_address_str = - format!("\\\\x{}", hex::encode(tx_initiator_address.0)); - for event in events { - let address_str = format!("\\\\x{}", hex::encode(event.address.0)); - let mut topics_str: Vec = event - .indexed_topics - .into_iter() - .map(|topic| format!("\\\\x{}", hex::encode(topic.0))) - .collect(); - topics_str.resize(4, "\\\\x".to_string()); - let value_str = format!("\\\\x{}", hex::encode(event.value)); - let row = format!( - "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n", - block_number, - tx_hash_str, - tx_index_in_block, - address_str, - event_index_in_block, - event_index_in_tx, - topics_str[0], - topics_str[1], - topics_str[2], - topics_str[3], - value_str, - tx_initiator_address_str, - now, - now - ); - bytes.extend_from_slice(row.as_bytes()); - - event_index_in_block += 1; - event_index_in_tx += 1; - } - } - copy.send(bytes).await.unwrap(); - // note: all the time spent in this function is spent in `copy.finish()` - copy.finish().await.unwrap(); - }) - } - - pub fn rollback_events(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM events WHERE miniblock_number > $1", - block_number.0 as i64 + created_at, updated_at + ) + FROM STDIN WITH (DELIMITER '|')", ) - .execute(self.storage.conn()) .await .unwrap(); - }) + + let mut buffer = String::new(); + let now = Utc::now().naive_utc().to_string(); + let mut event_index_in_block = 0_u32; + for (tx_location, events) in all_block_events { + let IncludedTxLocation { + tx_hash, + tx_index_in_miniblock, + tx_initiator_address, + } = tx_location; + + for (event_index_in_tx, event) in events.iter().enumerate() { + write_str!( + &mut buffer, + r"{block_number}|\\x{tx_hash:x}|{tx_index_in_miniblock}|\\x{address:x}|", + address = event.address + ); + write_str!(&mut buffer, "{event_index_in_block}|{event_index_in_tx}|"); + write_str!( + &mut buffer, + r"\\x{topic0:x}|\\x{topic1:x}|\\x{topic2:x}|\\x{topic3:x}|", + topic0 = EventTopic(event.indexed_topics.get(0)), + topic1 = EventTopic(event.indexed_topics.get(1)), + topic2 = EventTopic(event.indexed_topics.get(2)), + topic3 = EventTopic(event.indexed_topics.get(3)) + ); + writeln_str!( + &mut buffer, + r"\\x{value}|\\x{tx_initiator_address:x}|{now}|{now}", + value = hex::encode(&event.value) + ); + + event_index_in_block += 1; + } + } + copy.send(buffer.as_bytes()).await.unwrap(); + // note: all the time spent in this function is spent in `copy.finish()` + copy.finish().await.unwrap(); + } + + /// Removes events with a block number strictly greater than the specified `block_number`. + pub async fn rollback_events(&mut self, block_number: MiniblockNumber) { + sqlx::query!( + "DELETE FROM events WHERE miniblock_number > $1", + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn save_l2_to_l1_logs( + /// Saves L2-to-L1 logs from a miniblock. Logs must be ordered by transaction location + /// and within each transaction. + pub async fn save_l2_to_l1_logs( &mut self, block_number: MiniblockNumber, - all_block_l2_to_l1_logs: Vec<(IncludedTxLocation, Vec)>, + all_block_l2_to_l1_logs: &[(IncludedTxLocation, Vec<&L2ToL1Log>)], ) { - async_std::task::block_on(async { - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY l2_to_l1_logs( - miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, - tx_index_in_miniblock, tx_index_in_l1_batch, - shard_id, is_service, sender, key, value, - created_at, updated_at - ) - FROM STDIN WITH (DELIMITER '|')", + let mut copy = self + .storage + .conn() + .copy_in_raw( + "COPY l2_to_l1_logs( + miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, + tx_index_in_miniblock, tx_index_in_l1_batch, + shard_id, is_service, sender, key, value, + created_at, updated_at ) - .await - .unwrap(); - - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - let mut log_index_in_miniblock = 0u32; - let mut log_index_in_tx: u32; - for (tx_location, logs) in all_block_l2_to_l1_logs { - log_index_in_tx = 0; - let tx_hash_str = format!("\\\\x{}", hex::encode(tx_location.tx_hash.0)); - for log in logs { - let sender_str = format!("\\\\x{}", hex::encode(log.sender)); - let key_str = format!("\\\\x{}", hex::encode(log.key)); - let value_str = format!("\\\\x{}", hex::encode(log.value)); - let row = format!( - "{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\n", - block_number, - log_index_in_miniblock, - log_index_in_tx, - tx_hash_str, - tx_location.tx_index_in_miniblock, - log.tx_number_in_block, - log.shard_id, - log.is_service, - sender_str, - key_str, - value_str, - now, - now - ); - bytes.extend_from_slice(row.as_bytes()); - - log_index_in_miniblock += 1; - log_index_in_tx += 1; - } + FROM STDIN WITH (DELIMITER '|')", + ) + .await + .unwrap(); + + let mut buffer = String::new(); + let now = Utc::now().naive_utc().to_string(); + let mut log_index_in_miniblock = 0u32; + for (tx_location, logs) in all_block_l2_to_l1_logs { + let IncludedTxLocation { + tx_hash, + tx_index_in_miniblock, + .. + } = tx_location; + + for (log_index_in_tx, log) in logs.iter().enumerate() { + let L2ToL1Log { + shard_id, + is_service, + tx_number_in_block, + sender, + key, + value, + } = log; + + write_str!( + &mut buffer, + r"{block_number}|{log_index_in_miniblock}|{log_index_in_tx}|\\x{tx_hash:x}|" + ); + write_str!( + &mut buffer, + r"{tx_index_in_miniblock}|{tx_number_in_block}|{shard_id}|{is_service}|" + ); + writeln_str!( + &mut buffer, + r"\\x{sender:x}|\\x{key:x}|\\x{value:x}|{now}|{now}" + ); + + log_index_in_miniblock += 1; } - copy.send(bytes).await.unwrap(); - copy.finish().await.unwrap(); - }) + } + copy.send(buffer.as_bytes()).await.unwrap(); + copy.finish().await.unwrap(); } - pub fn rollback_l2_to_l1_logs(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1", - block_number.0 as i64 - ) - .execute(self.storage.conn()) + /// Removes all L2-to-L1 logs with a miniblock number strictly greater than the specified `block_number`. + pub async fn rollback_l2_to_l1_logs(&mut self, block_number: MiniblockNumber) { + sqlx::query!( + "DELETE FROM l2_to_l1_logs WHERE miniblock_number > $1", + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub(crate) async fn l2_to_l1_logs( + &mut self, + tx_hash: H256, + ) -> Result, SqlxError> { + sqlx::query_as!( + StorageL2ToL1Log, + "SELECT \ + miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, \ + Null::bytea as \"block_hash\", Null::bigint as \"l1_batch_number?\", \ + shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value \ + FROM l2_to_l1_logs \ + WHERE tx_hash = $1 \ + ORDER BY log_index_in_tx ASC", + tx_hash.as_bytes() + ) + .fetch_all(self.storage.conn()) + .await + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{tests::create_miniblock_header, ConnectionPool}; + use db_test_macro::db_test; + use zksync_types::{Address, L1BatchNumber}; + + fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { + assert!(topic_count <= 4); + VmEvent { + location: (L1BatchNumber(1), u32::from(index)), + address: Address::repeat_byte(index), + indexed_topics: (0..topic_count).map(H256::repeat_byte).collect(), + value: vec![index], + } + } + + #[db_test(dal_crate)] + async fn storing_events(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + conn.events_dal().rollback_events(MiniblockNumber(0)).await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(1)) + .await; + + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_events = vec![create_vm_event(0, 0), create_vm_event(1, 4)]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_events = vec![ + create_vm_event(2, 2), + create_vm_event(3, 3), + create_vm_event(4, 4), + ]; + let all_events = vec![ + (first_location, first_events.iter().collect()), + (second_location, second_events.iter().collect()), + ]; + conn.events_dal() + .save_events(MiniblockNumber(1), &all_events) + .await; + + let logs = conn + .events_web3_dal() + .get_all_logs(MiniblockNumber(0)) .await .unwrap(); - }) + assert_eq!(logs.len(), 5); + for (i, log) in logs.iter().enumerate() { + let (expected_tx_index, expected_topics) = if i < first_events.len() { + (0_u64, &first_events[i].indexed_topics) + } else { + (1_u64, &second_events[i - first_events.len()].indexed_topics) + }; + let i = i as u8; + + assert_eq!(log.block_number, Some(1_u64.into())); + assert_eq!(log.l1_batch_number, None); + assert_eq!(log.address, Address::repeat_byte(i)); + assert_eq!(log.transaction_index, Some(expected_tx_index.into())); + assert_eq!(log.log_index, Some(i.into())); + assert_eq!(log.data.0, [i]); + assert_eq!(log.topics, *expected_topics); + } } - pub fn get_first_miniblock_with_saved_l2_to_l1_logs(&mut self) -> Option { - async_std::task::block_on(async { - let row = sqlx::query!( - r#" - SELECT MIN(miniblock_number) as "min?" - FROM l2_to_l1_logs - "#, - ) - .fetch_one(self.storage.conn()) + fn create_l2_to_l1_log(tx_number_in_block: u16, index: u8) -> L2ToL1Log { + L2ToL1Log { + shard_id: 0, + is_service: false, + tx_number_in_block, + sender: Address::repeat_byte(index), + key: H256::from_low_u64_be(u64::from(index)), + value: H256::repeat_byte(index), + } + } + + #[db_test(dal_crate)] + async fn storing_l2_to_l1_logs(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + conn.events_dal() + .rollback_l2_to_l1_logs(MiniblockNumber(0)) + .await; + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(1)) + .await; + + let first_location = IncludedTxLocation { + tx_hash: H256([1; 32]), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::default(), + }; + let first_logs = vec![create_l2_to_l1_log(0, 0), create_l2_to_l1_log(0, 1)]; + let second_location = IncludedTxLocation { + tx_hash: H256([2; 32]), + tx_index_in_miniblock: 1, + tx_initiator_address: Address::default(), + }; + let second_logs = vec![ + create_l2_to_l1_log(1, 2), + create_l2_to_l1_log(1, 3), + create_l2_to_l1_log(1, 4), + ]; + let all_logs = vec![ + (first_location, first_logs.iter().collect()), + (second_location, second_logs.iter().collect()), + ]; + conn.events_dal() + .save_l2_to_l1_logs(MiniblockNumber(1), &all_logs) + .await; + + let logs = conn + .events_dal() + .l2_to_l1_logs(H256([1; 32])) + .await + .unwrap(); + assert_eq!(logs.len(), first_logs.len()); + for (i, log) in logs.iter().enumerate() { + assert_eq!(log.log_index_in_miniblock as usize, i); + assert_eq!(log.log_index_in_tx as usize, i); + } + for (log, expected_log) in logs.iter().zip(&first_logs) { + assert_eq!(log.key, expected_log.key.as_bytes()); + assert_eq!(log.value, expected_log.value.as_bytes()); + assert_eq!(log.sender, expected_log.sender.as_bytes()); + } + + let logs = conn + .events_dal() + .l2_to_l1_logs(H256([2; 32])) .await .unwrap(); - row.min.map(|min| MiniblockNumber(min as u32)) - }) + assert_eq!(logs.len(), second_logs.len()); + for (i, log) in logs.iter().enumerate() { + assert_eq!(log.log_index_in_miniblock as usize, i + first_logs.len()); + assert_eq!(log.log_index_in_tx as usize, i); + } + for (log, expected_log) in logs.iter().zip(&second_logs) { + assert_eq!(log.key, expected_log.key.as_bytes()); + assert_eq!(log.value, expected_log.value.as_bytes()); + assert_eq!(log.sender, expected_log.sender.as_bytes()); + } } } diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 4c854fc95350..83edea20a152 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -14,18 +14,18 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl EventsWeb3Dal<'_, '_> { /// Returns miniblock number of log for given filter and offset. /// Used to determine if there is more than `offset` logs that satisfies filter. - pub fn get_log_block_number( + pub async fn get_log_block_number( &mut self, filter: GetLogsFilter, offset: usize, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let started_at = Instant::now(); let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); @@ -60,13 +60,17 @@ impl EventsWeb3Dal<'_, '_> { metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_log_block_number"); Ok(log.map(|row| MiniblockNumber(row.get::("miniblock_number") as u32))) - }) + } } /// Returns logs for given filter. #[allow(clippy::type_complexity)] - pub fn get_logs(&mut self, filter: GetLogsFilter, limit: usize) -> Result, SqlxError> { - async_std::task::block_on(async { + pub async fn get_logs( + &mut self, + filter: GetLogsFilter, + limit: usize, + ) -> Result, SqlxError> { + { let started_at = Instant::now(); let (where_sql, arg_index) = self.build_get_logs_where_clause(&filter); @@ -109,7 +113,7 @@ impl EventsWeb3Dal<'_, '_> { let logs = db_logs.into_iter().map(Into::into).collect(); metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_logs"); Ok(logs) - }) + } } fn build_get_logs_where_clause(&self, filter: &GetLogsFilter) -> (String, u8) { @@ -133,8 +137,11 @@ impl EventsWeb3Dal<'_, '_> { (where_sql, arg_index) } - pub fn get_all_logs(&mut self, from_block: MiniblockNumber) -> Result, SqlxError> { - async_std::task::block_on(async { + pub async fn get_all_logs( + &mut self, + from_block: MiniblockNumber, + ) -> Result, SqlxError> { + { let db_logs: Vec = sqlx::query_as!( StorageWeb3Log, r#" @@ -161,15 +168,15 @@ impl EventsWeb3Dal<'_, '_> { .await?; let logs = db_logs.into_iter().map(Into::into).collect(); Ok(logs) - }) + } } } #[cfg(test)] mod tests { use db_test_macro::db_test; - use vm::zk_evm::ethereum_types::{Address, H256}; use zksync_types::api::BlockNumber; + use zksync_types::{Address, H256}; use super::*; use crate::connection::ConnectionPool; diff --git a/core/lib/dal/src/explorer/contract_verification_dal.rs b/core/lib/dal/src/explorer/contract_verification_dal.rs index 9f80efc936f9..02b47eee53e4 100644 --- a/core/lib/dal/src/explorer/contract_verification_dal.rs +++ b/core/lib/dal/src/explorer/contract_verification_dal.rs @@ -1,3 +1,4 @@ +use std::fmt::{Display, Formatter}; use std::time::Duration; use zksync_types::{ @@ -10,17 +11,37 @@ use zksync_types::{ use sqlx::postgres::types::PgInterval; +use crate::models::storage_verification_request::StorageVerificationRequest; use crate::SqlxError; use crate::StorageProcessor; #[derive(Debug)] pub struct ContractVerificationDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, +} + +#[derive(Debug)] +enum Compiler { + ZkSolc, + Solc, + ZkVyper, + Vyper, +} + +impl Display for Compiler { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::ZkSolc => f.write_str("zksolc"), + Self::Solc => f.write_str("solc"), + Self::ZkVyper => f.write_str("zkvyper"), + Self::Vyper => f.write_str("vyper"), + } + } } impl ContractVerificationDal<'_, '_> { - pub fn get_count_of_queued_verification_requests(&mut self) -> Result { - async_std::task::block_on(async { + pub async fn get_count_of_queued_verification_requests(&mut self) -> Result { + { sqlx::query!( r#" SELECT COUNT(*) as "count!" @@ -31,62 +52,65 @@ impl ContractVerificationDal<'_, '_> { .fetch_one(self.storage.conn()) .await .map(|row| row.count as usize) - }) + } } - pub fn add_contract_verification_request( + pub async fn add_contract_verification_request( &mut self, query: VerificationIncomingRequest, ) -> Result { - async_std::task::block_on(async { + { sqlx::query!( " INSERT INTO contract_verification_requests ( contract_address, source_code, contract_name, - compiler_zksolc_version, - compiler_solc_version, + zk_compiler_version, + compiler_version, optimization_used, + optimizer_mode, constructor_arguments, is_system, status, created_at, updated_at ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', now(), now()) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'queued', now(), now()) RETURNING id ", query.contract_address.as_bytes(), serde_json::to_string(&query.source_code_data).unwrap(), query.contract_name, - query.compiler_zksolc_version, - query.compiler_solc_version, + query.compiler_versions.zk_compiler_version(), + query.compiler_versions.compiler_version(), query.optimization_used, + query.optimizer_mode, query.constructor_arguments.0, query.is_system, ) .fetch_one(self.storage.conn()) .await .map(|row| row.id as usize) - }) + } } /// Returns the next verification request for processing. /// Considering the situation where processing of some request /// can be interrupted (panic, pod restart, etc..), /// `processing_timeout` parameter is added to avoid stucking of requests. - pub fn get_next_queued_verification_request( + pub async fn get_next_queued_verification_request( &mut self, processing_timeout: Duration, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let processing_timeout = PgInterval { months: 0, days: 0, microseconds: processing_timeout.as_micros() as i64, }; - let result = sqlx::query!( + let result = sqlx::query_as!( + StorageVerificationRequest, "UPDATE contract_verification_requests SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() @@ -98,34 +122,24 @@ impl ContractVerificationDal<'_, '_> { FOR UPDATE SKIP LOCKED ) - RETURNING contract_verification_requests.*", + RETURNING id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used, + optimizer_mode, constructor_arguments, is_system + ", &processing_timeout ) .fetch_optional(self.storage.conn()) .await? - .map(|row| VerificationRequest { - id: row.id as usize, - req: VerificationIncomingRequest { - contract_address: Address::from_slice(&row.contract_address), - source_code_data: serde_json::from_str(&row.source_code).unwrap(), - contract_name: row.contract_name, - compiler_zksolc_version: row.compiler_zksolc_version, - compiler_solc_version: row.compiler_solc_version, - optimization_used: row.optimization_used, - constructor_arguments: row.constructor_arguments.into(), - is_system: row.is_system, - }, - }); + .map(Into::into); Ok(result) - }) + } } /// Updates the verification request status and inserts the verification info upon successful verification. - pub fn save_verification_info( + pub async fn save_verification_info( &mut self, verification_info: VerificationInfo, ) -> Result<(), SqlxError> { - async_std::task::block_on(async { + { let mut transaction = self.storage.start_transaction().await; sqlx::query!( @@ -158,17 +172,17 @@ impl ContractVerificationDal<'_, '_> { transaction.commit().await; Ok(()) - }) + } } - pub fn save_verification_error( + pub async fn save_verification_error( &mut self, id: usize, error: String, compilation_errors: serde_json::Value, panic_message: Option, ) -> Result<(), SqlxError> { - async_std::task::block_on(async { + { sqlx::query!( " UPDATE contract_verification_requests @@ -183,14 +197,14 @@ impl ContractVerificationDal<'_, '_> { .execute(self.storage.conn()) .await?; Ok(()) - }) + } } - pub fn get_verification_request_status( + pub async fn get_verification_request_status( &mut self, id: usize, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let result = sqlx::query!( " SELECT status, error, compilation_errors FROM contract_verification_requests @@ -220,15 +234,15 @@ impl ContractVerificationDal<'_, '_> { }), }); Ok(result) - }) + } } /// Returns bytecode and calldata from the contract and the transaction that created it. - pub fn get_contract_info_for_verification( + pub async fn get_contract_info_for_verification( &mut self, address: Address, ) -> Result, DeployContractCalldata)>, SqlxError> { - async_std::task::block_on(async { + { let hashed_key = get_code_key(&address).hashed_key(); let result = sqlx::query!( r#" @@ -266,12 +280,12 @@ impl ContractVerificationDal<'_, '_> { (row.bytecode, calldata) }); Ok(result) - }) + } } /// Returns true if the contract has a stored contracts_verification_info. - pub fn is_contract_verified(&mut self, address: Address) -> bool { - async_std::task::block_on(async { + pub async fn is_contract_verified(&mut self, address: Address) -> bool { + { let count = sqlx::query!( r#" SELECT COUNT(*) as "count!" @@ -285,13 +299,18 @@ impl ContractVerificationDal<'_, '_> { .unwrap() .count; count > 0 - }) + } } - pub fn get_zksolc_versions(&mut self) -> Result, SqlxError> { - async_std::task::block_on(async { + async fn get_compiler_versions( + &mut self, + compiler: Compiler, + ) -> Result, SqlxError> { + { + let compiler = format!("{compiler}"); let versions: Vec<_> = sqlx::query!( - "SELECT version FROM contract_verification_zksolc_versions ORDER by version" + "SELECT version FROM compiler_versions WHERE compiler = $1 ORDER by version", + &compiler ) .fetch_all(self.storage.conn()) .await? @@ -299,98 +318,93 @@ impl ContractVerificationDal<'_, '_> { .map(|row| row.version) .collect(); Ok(versions) - }) + } } - pub fn get_solc_versions(&mut self) -> Result, SqlxError> { - async_std::task::block_on(async { - let versions: Vec<_> = sqlx::query!( - "SELECT version FROM contract_verification_solc_versions ORDER by version" - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|row| row.version) - .collect(); - Ok(versions) - }) + pub async fn get_zksolc_versions(&mut self) -> Result, SqlxError> { + self.get_compiler_versions(Compiler::ZkSolc).await + } + + pub async fn get_solc_versions(&mut self) -> Result, SqlxError> { + self.get_compiler_versions(Compiler::Solc).await + } + + pub async fn get_zkvyper_versions(&mut self) -> Result, SqlxError> { + self.get_compiler_versions(Compiler::ZkVyper).await + } + + pub async fn get_vyper_versions(&mut self) -> Result, SqlxError> { + self.get_compiler_versions(Compiler::Vyper).await } - pub fn set_zksolc_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { - async_std::task::block_on(async { + async fn set_compiler_versions( + &mut self, + compiler: Compiler, + versions: Vec, + ) -> Result<(), SqlxError> { + { let mut transaction = self.storage.start_transaction().await; + let compiler = format!("{compiler}"); - sqlx::query!("DELETE FROM contract_verification_zksolc_versions") - .execute(transaction.conn()) - .await?; + sqlx::query!( + "DELETE FROM compiler_versions WHERE compiler = $1", + &compiler + ) + .execute(transaction.conn()) + .await?; sqlx::query!( " - INSERT INTO contract_verification_zksolc_versions (version, created_at, updated_at) - SELECT u.version, now(), now() - FROM UNNEST($1::text[]) - AS u(version) - ", - &versions + INSERT INTO compiler_versions (version, compiler, created_at, updated_at) + SELECT u.version, $2, now(), now() + FROM UNNEST($1::text[]) + AS u(version)", + &versions, + &compiler, ) - .execute(transaction.conn()) - .await?; + .execute(transaction.conn()) + .await?; transaction.commit().await; Ok(()) - }) + } } - pub fn set_solc_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { - async_std::task::block_on(async { - let mut transaction = self.storage.start_transaction().await; + pub async fn set_zksolc_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { + self.set_compiler_versions(Compiler::ZkSolc, versions).await + } - sqlx::query!("DELETE FROM contract_verification_solc_versions") - .execute(transaction.conn()) - .await?; + pub async fn set_solc_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { + self.set_compiler_versions(Compiler::Solc, versions).await + } - sqlx::query!( - " - INSERT INTO contract_verification_solc_versions (version, created_at, updated_at) - SELECT u.version, now(), now() - FROM UNNEST($1::text[]) - AS u(version) - ", - &versions - ) - .execute(transaction.conn()) - .await?; + pub async fn set_zkvyper_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { + self.set_compiler_versions(Compiler::ZkVyper, versions) + .await + } - transaction.commit().await; - Ok(()) - }) + pub async fn set_vyper_versions(&mut self, versions: Vec) -> Result<(), SqlxError> { + self.set_compiler_versions(Compiler::Vyper, versions).await } - pub fn get_all_successful_requests(&mut self) -> Result, SqlxError> { - async_std::task::block_on(async { - let result = sqlx::query!( - "SELECT * FROM contract_verification_requests + pub async fn get_all_successful_requests( + &mut self, + ) -> Result, SqlxError> { + { + let result = sqlx::query_as!( + StorageVerificationRequest, + "SELECT id, contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used, + optimizer_mode, constructor_arguments, is_system + FROM contract_verification_requests WHERE status = 'successful' ORDER BY id", ) .fetch_all(self.storage.conn()) .await? .into_iter() - .map(|row| VerificationRequest { - id: row.id as usize, - req: VerificationIncomingRequest { - contract_address: Address::from_slice(&row.contract_address), - source_code_data: serde_json::from_str(&row.source_code).unwrap(), - contract_name: row.contract_name, - compiler_zksolc_version: row.compiler_zksolc_version, - compiler_solc_version: row.compiler_solc_version, - optimization_used: row.optimization_used, - constructor_arguments: row.constructor_arguments.into(), - is_system: row.is_system, - }, - }) + .map(Into::into) .collect(); Ok(result) - }) + } } } diff --git a/core/lib/dal/src/explorer/explorer_accounts_dal.rs b/core/lib/dal/src/explorer/explorer_accounts_dal.rs index 78c398450e14..b3b4e0c6c3f0 100644 --- a/core/lib/dal/src/explorer/explorer_accounts_dal.rs +++ b/core/lib/dal/src/explorer/explorer_accounts_dal.rs @@ -10,25 +10,25 @@ use zksync_types::{ U256, }; -use crate::SqlxError; -use crate::StorageProcessor; +use crate::{SqlxError, StorageProcessor}; #[derive(Debug)] pub struct ExplorerAccountsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, } impl ExplorerAccountsDal<'_, '_> { - pub fn get_balances_for_address( + pub async fn get_balances_for_address( &mut self, address: Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let token_l2_addresses = self .storage .explorer() .misc_dal() - .get_well_known_token_l2_addresses()?; + .get_well_known_token_l2_addresses() + .await?; let hashed_keys: Vec> = token_l2_addresses .into_iter() .map(|mut l2_token_address| { @@ -86,33 +86,45 @@ impl ExplorerAccountsDal<'_, '_> { }) .collect(); Ok(result) - }) + } } /// Returns sealed and verified nonces for address. - pub fn get_account_nonces(&mut self, address: Address) -> Result<(Nonce, Nonce), SqlxError> { + pub async fn get_account_nonces( + &mut self, + address: Address, + ) -> Result<(Nonce, Nonce), SqlxError> { + let latest_block_number = self + .storage + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) + .await? + .unwrap(); let sealed_nonce = self .storage .storage_web3_dal() - .get_address_historical_nonce(address, api::BlockId::Number(api::BlockNumber::Latest))? - .unwrap() + .get_address_historical_nonce(address, latest_block_number) + .await? .as_u32(); + + let finalized_block_number = self + .storage + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Finalized)) + .await? + .unwrap(); // Safe: we always have at least the genesis miniblock finalized let verified_nonce = self .storage .storage_web3_dal() - .get_address_historical_nonce( - address, - api::BlockId::Number(api::BlockNumber::Finalized), - )? - .unwrap_or_default() + .get_address_historical_nonce(address, finalized_block_number) + .await? .as_u32(); - Ok((Nonce(sealed_nonce), Nonce(verified_nonce))) } - pub fn get_account_type(&mut self, address: Address) -> Result { + pub async fn get_account_type(&mut self, address: Address) -> Result { let hashed_key = get_code_key(&address).hashed_key(); - async_std::task::block_on(async { + { let contract_exists = sqlx::query!( r#" SELECT true as "exists" @@ -134,6 +146,6 @@ impl ExplorerAccountsDal<'_, '_> { None => AccountType::EOA, }; Ok(result) - }) + } } } diff --git a/core/lib/dal/src/explorer/explorer_blocks_dal.rs b/core/lib/dal/src/explorer/explorer_blocks_dal.rs index 22ad9de51873..72b96785f7a4 100644 --- a/core/lib/dal/src/explorer/explorer_blocks_dal.rs +++ b/core/lib/dal/src/explorer/explorer_blocks_dal.rs @@ -15,16 +15,16 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct ExplorerBlocksDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, } impl ExplorerBlocksDal<'_, '_> { - pub fn get_blocks_page( + pub async fn get_blocks_page( &mut self, query: BlocksQuery, last_verified: MiniblockNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let (cmp_sign, order_str) = match query.pagination.direction { PaginationDirection::Older => ("<", "DESC"), PaginationDirection::Newer => (">", "ASC"), @@ -57,15 +57,15 @@ impl ExplorerBlocksDal<'_, '_> { .map(|row| block_page_item_from_storage(row, last_verified)) .collect(); Ok(result) - }) + } } - pub fn get_block_details( + pub async fn get_block_details( &mut self, block_number: MiniblockNumber, current_operator_address: Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let started_at = Instant::now(); let storage_block_details: Option = sqlx::query_as!( StorageBlockDetails, @@ -102,15 +102,15 @@ impl ExplorerBlocksDal<'_, '_> { Ok(storage_block_details.map(|storage_block_details| { storage_block_details.into_block_details(current_operator_address) })) - }) + } } - pub fn get_l1_batches_page( + pub async fn get_l1_batches_page( &mut self, query: L1BatchesQuery, last_verified: L1BatchNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let (cmp_sign, order_str) = match query.pagination.direction { PaginationDirection::Older => ("<", "DESC"), PaginationDirection::Newer => (">", "ASC"), @@ -143,14 +143,14 @@ impl ExplorerBlocksDal<'_, '_> { .map(|row| l1_batch_page_item_from_storage(row, last_verified)) .collect(); Ok(result) - }) + } } - pub fn get_l1_batch_details( + pub async fn get_l1_batch_details( &mut self, l1_batch_number: L1BatchNumber, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let started_at = Instant::now(); let l1_batch_details: Option = sqlx::query_as!( StorageL1BatchDetails, @@ -182,6 +182,6 @@ impl ExplorerBlocksDal<'_, '_> { .await?; metrics::histogram!("dal.request", started_at.elapsed(), "method" => "explorer_get_l1_batch_details"); Ok(l1_batch_details.map(L1BatchDetails::from)) - }) + } } } diff --git a/core/lib/dal/src/explorer/explorer_events_dal.rs b/core/lib/dal/src/explorer/explorer_events_dal.rs index 37d6b279f732..02db9bb90fc3 100644 --- a/core/lib/dal/src/explorer/explorer_events_dal.rs +++ b/core/lib/dal/src/explorer/explorer_events_dal.rs @@ -8,16 +8,16 @@ use crate::{SqlxError, StorageProcessor}; #[derive(Debug)] pub struct ExplorerEventsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, } impl ExplorerEventsDal<'_, '_> { - pub fn get_events_page( + pub async fn get_events_page( &mut self, query: EventsQuery, max_total: usize, ) -> Result { - async_std::task::block_on(async { + { let (cmp_sign, order_str) = match query.pagination.direction { PaginationDirection::Older => ("<", "DESC"), PaginationDirection::Newer => (">", "ASC"), @@ -112,6 +112,6 @@ impl ExplorerEventsDal<'_, '_> { list: logs, total: total as usize, }) - }) + } } } diff --git a/core/lib/dal/src/explorer/explorer_misc_dal.rs b/core/lib/dal/src/explorer/explorer_misc_dal.rs index 50003dc4c15e..2946fe4d7b17 100644 --- a/core/lib/dal/src/explorer/explorer_misc_dal.rs +++ b/core/lib/dal/src/explorer/explorer_misc_dal.rs @@ -8,15 +8,15 @@ use zksync_types::{ #[derive(Debug)] pub struct ExplorerMiscDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, } impl ExplorerMiscDal<'_, '_> { - pub fn get_token_details( + pub async fn get_token_details( &mut self, address: Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let row = sqlx::query!( r#" SELECT l1_address, l2_address, symbol, name, decimals, usd_price @@ -37,11 +37,11 @@ impl ExplorerMiscDal<'_, '_> { usd_price: row.usd_price, }); Ok(result) - }) + } } - pub fn get_well_known_token_l2_addresses(&mut self) -> Result, SqlxError> { - async_std::task::block_on(async { + pub async fn get_well_known_token_l2_addresses(&mut self) -> Result, SqlxError> { + { let addresses = sqlx::query!("SELECT l2_address FROM tokens WHERE well_known = true") .fetch_all(self.storage.conn()) .await? @@ -49,14 +49,14 @@ impl ExplorerMiscDal<'_, '_> { .map(|record| Address::from_slice(&record.l2_address)) .collect(); Ok(addresses) - }) + } } - pub fn get_contract_info( + pub async fn get_contract_info( &mut self, address: Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let hashed_key = get_code_key(&address).hashed_key(); let info = sqlx::query_as!( StorageContractInfo, @@ -87,11 +87,14 @@ impl ExplorerMiscDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; Ok(info.map(|info| info.into())) - }) + } } - pub fn get_contract_stats(&mut self, address: Address) -> Result { - async_std::task::block_on(async { + pub async fn get_contract_stats( + &mut self, + address: Address, + ) -> Result { + { let row = sqlx::query!( r#" SELECT COUNT(*) as "total_transactions!" @@ -108,6 +111,6 @@ impl ExplorerMiscDal<'_, '_> { }) .unwrap_or_default(); Ok(result) - }) + } } } diff --git a/core/lib/dal/src/explorer/explorer_transactions_dal.rs b/core/lib/dal/src/explorer/explorer_transactions_dal.rs index b6ead0b27847..1761af88266f 100644 --- a/core/lib/dal/src/explorer/explorer_transactions_dal.rs +++ b/core/lib/dal/src/explorer/explorer_transactions_dal.rs @@ -26,16 +26,16 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct ExplorerTransactionsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(super) storage: &'a mut StorageProcessor<'c>, } impl ExplorerTransactionsDal<'_, '_> { - pub fn get_transactions_count_between( + pub async fn get_transactions_count_between( &mut self, from_block_number: MiniblockNumber, to_block_number: MiniblockNumber, ) -> Result { - async_std::task::block_on(async { + { let tx_count = sqlx::query!( r#"SELECT COUNT(*) as "count!" FROM transactions WHERE miniblock_number BETWEEN $1 AND $2"#, @@ -46,15 +46,15 @@ impl ExplorerTransactionsDal<'_, '_> { .await? .count as usize; Ok(tx_count) - }) + } } - pub fn get_transaction_details( + pub async fn get_transaction_details( &mut self, hash: H256, l2_erc20_bridge_addr: Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let tx_details: Option = sqlx::query_as!( StorageTransactionDetails, r#" @@ -77,7 +77,8 @@ impl ExplorerTransactionsDal<'_, '_> { .await?; let tx = if let Some(tx_details) = tx_details { let list = self - .storage_tx_list_to_tx_details_list(vec![tx_details], l2_erc20_bridge_addr)?; + .storage_tx_list_to_tx_details_list(vec![tx_details], l2_erc20_bridge_addr) + .await?; let tx = list[0].clone(); let logs: Vec = sqlx::query_as!( StorageWeb3Log, @@ -108,11 +109,11 @@ impl ExplorerTransactionsDal<'_, '_> { None }; Ok(tx) - }) + } } #[allow(clippy::too_many_arguments)] - pub fn get_transactions_page( + pub async fn get_transactions_page( &mut self, from_tx_location: Option, block_number: Option, @@ -122,7 +123,7 @@ impl ExplorerTransactionsDal<'_, '_> { max_total: usize, l2_erc20_bridge_addr: Address, ) -> Result { - async_std::task::block_on(async { + { let (cmp_sign, order_str) = match pagination.direction { PaginationDirection::Older => ("<", "DESC"), PaginationDirection::Newer => (">", "ASC"), @@ -189,8 +190,9 @@ impl ExplorerTransactionsDal<'_, '_> { let storage_txs: Vec = sqlx::query_as(&sql_query_list_str) .fetch_all(self.storage.conn()) .await?; - let list = - self.storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr)?; + let list = self + .storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr) + .await?; let sql_query_total_str = format!( r#" @@ -208,11 +210,11 @@ impl ExplorerTransactionsDal<'_, '_> { .get::("count") as usize; Ok(TransactionsResponse { list, total }) - }) + } } #[allow(clippy::too_many_arguments)] - pub fn get_account_transactions_page( + pub async fn get_account_transactions_page( &mut self, account_address: Address, from_tx_location: Option, @@ -221,19 +223,21 @@ impl ExplorerTransactionsDal<'_, '_> { max_total: usize, l2_erc20_bridge_addr: Address, ) -> Result { - async_std::task::block_on(async { + { let order_str = match pagination.direction { PaginationDirection::Older => "DESC", PaginationDirection::Newer => "ASC", }; - let (hashes, total) = self.get_account_transactions_hashes_page( - account_address, - from_tx_location, - block_number, - pagination, - max_total, - )?; + let (hashes, total) = self + .get_account_transactions_hashes_page( + account_address, + from_tx_location, + block_number, + pagination, + max_total, + ) + .await?; let sql_query_str = format!( r#" SELECT transactions.*, miniblocks.hash as "block_hash", @@ -256,14 +260,15 @@ impl ExplorerTransactionsDal<'_, '_> { let sql_query = sqlx::query_as(&sql_query_str).bind(hashes); let storage_txs: Vec = sql_query.fetch_all(self.storage.conn()).await?; - let list = - self.storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr)?; + let list = self + .storage_tx_list_to_tx_details_list(storage_txs, l2_erc20_bridge_addr) + .await?; Ok(TransactionsResponse { list, total }) - }) + } } - fn get_account_transactions_hashes_page( + async fn get_account_transactions_hashes_page( &mut self, account_address: Address, from_tx_location: Option, @@ -271,7 +276,7 @@ impl ExplorerTransactionsDal<'_, '_> { pagination: PaginationQuery, max_total: usize, ) -> Result<(Vec>, usize), SqlxError> { - async_std::task::block_on(async { + { let started_at = Instant::now(); let (cmp_sign, order_str) = match pagination.direction { PaginationDirection::Older => ("<", "DESC"), @@ -416,14 +421,14 @@ impl ExplorerTransactionsDal<'_, '_> { metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_account_transactions_hashes_page"); Ok((result, total)) - }) + } } - fn get_erc20_transfers( + async fn get_erc20_transfers( &mut self, hashes: Vec>, ) -> Result>, SqlxError> { - async_std::task::block_on(async { + { let transfers = sqlx::query!( r#" SELECT tx_hash, topic2 as "topic2!", topic3 as "topic3!", value as "value!", @@ -467,15 +472,15 @@ impl ExplorerTransactionsDal<'_, '_> { }).collect::>())) .collect(); Ok(transfers) - }) + } } - fn get_withdrawals( + async fn get_withdrawals( &mut self, hashes: Vec>, l2_erc20_bridge_addr: Address, ) -> Result>, SqlxError> { - async_std::task::block_on(async { + { static ERC20_WITHDRAW_EVENT_SIGNATURE: Lazy = Lazy::new(|| { zksync_contracts::l2_bridge_contract() .event("WithdrawalInitiated") @@ -581,16 +586,16 @@ impl ExplorerTransactionsDal<'_, '_> { } Ok(withdrawals) - }) + } } /// Returns hashmap with transactions that are deposits. - fn get_deposits( + async fn get_deposits( &mut self, hashes: Vec>, l2_erc20_bridge_addr: Address, ) -> Result>, SqlxError> { - async_std::task::block_on(async { + { static ERC20_DEPOSIT_EVENT_SIGNATURE: Lazy = Lazy::new(|| { zksync_contracts::l2_bridge_contract() .event("FinalizeDeposit") @@ -693,15 +698,15 @@ impl ExplorerTransactionsDal<'_, '_> { } Ok(deposits) - }) + } } /// Returns hashmap with transactions that are ERC20 transfers. - fn filter_erc20_transfers( + async fn filter_erc20_transfers( &mut self, txs: &[StorageTransactionDetails], ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let hashes: Vec> = txs.iter().map(|tx| tx.hash.clone()).collect(); // For transaction to be ERC20 transfer 2 conditions should be met // 1) It is an execute transaction and contract address is an ERC20 token. @@ -764,7 +769,8 @@ impl ExplorerTransactionsDal<'_, '_> { .storage .explorer() .misc_dal() - .get_token_details(Address::zero())? + .get_token_details(Address::zero()) + .await? .expect("Info about ETH should be present in DB"); let eth_transfers_iter = txs.iter().filter_map(|tx| { let hash = H256::from_slice(&tx.hash); @@ -791,19 +797,21 @@ impl ExplorerTransactionsDal<'_, '_> { let result = erc20_transfers_iter.chain(eth_transfers_iter).collect(); Ok(result) - }) + } } - fn storage_tx_list_to_tx_details_list( + async fn storage_tx_list_to_tx_details_list( &mut self, txs: Vec, l2_erc20_bridge_addr: Address, ) -> Result, SqlxError> { let hashes: Vec> = txs.iter().map(|tx| tx.hash.clone()).collect(); - let erc20_transfers_map = self.get_erc20_transfers(hashes.clone())?; - let withdrawals_map = self.get_withdrawals(hashes.clone(), l2_erc20_bridge_addr)?; - let erc20_transfers_filtered = self.filter_erc20_transfers(&txs)?; - let deposits_map = self.get_deposits(hashes, l2_erc20_bridge_addr)?; + let erc20_transfers_map = self.get_erc20_transfers(hashes.clone()).await?; + let withdrawals_map = self + .get_withdrawals(hashes.clone(), l2_erc20_bridge_addr) + .await?; + let erc20_transfers_filtered = self.filter_erc20_transfers(&txs).await?; + let deposits_map = self.get_deposits(hashes, l2_erc20_bridge_addr).await?; let txs = txs .into_iter() .map(|tx_details| { diff --git a/core/lib/dal/src/explorer/mod.rs b/core/lib/dal/src/explorer/mod.rs index f1951a002461..483c904597a1 100644 --- a/core/lib/dal/src/explorer/mod.rs +++ b/core/lib/dal/src/explorer/mod.rs @@ -15,11 +15,11 @@ pub mod explorer_transactions_dal; pub mod storage_contract_info; #[derive(Debug)] -pub struct ExplorerIntermediator<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, +pub struct ExplorerIntermediary<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, } -impl<'a, 'c> ExplorerIntermediator<'a, 'c> { +impl<'a, 'c> ExplorerIntermediary<'a, 'c> { pub fn contract_verification_dal(self) -> ContractVerificationDal<'a, 'c> { ContractVerificationDal { storage: self.storage, diff --git a/core/lib/dal/src/fee_monitor_dal.rs b/core/lib/dal/src/fee_monitor_dal.rs deleted file mode 100644 index e15ec62aa5aa..000000000000 --- a/core/lib/dal/src/fee_monitor_dal.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::StorageProcessor; -use crate::{events_web3_dal::EventsWeb3Dal, models::storage_fee_monitor::StorageBlockGasData}; -use zksync_config::constants::ERC20_TRANSFER_TOPIC; -use zksync_types::{ - api::{self, GetLogsFilter}, - Address, L1BatchNumber, L2_ETH_TOKEN_ADDRESS, U256, -}; -use zksync_utils::address_to_h256; - -// Dev note: these structure is not fundamental and exists for auxiliary -// monitoring purposes, it's use cases are limited and will normally appear -// together with calls to `FeeMonitorDal` methods, thus it doesn't really -// makes sense to extract it to the `types` crate. - -#[derive(Debug)] -pub struct GasConsumptionData { - pub consumed_gas: u64, - pub base_gas_price: u64, - pub priority_gas_price: u64, -} - -impl GasConsumptionData { - pub fn wei_spent(&self) -> u128 { - (self.base_gas_price + self.priority_gas_price) as u128 * self.consumed_gas as u128 - } -} - -#[derive(Debug)] -pub struct BlockGasConsumptionData { - pub block_number: L1BatchNumber, - pub commit: GasConsumptionData, - pub prove: GasConsumptionData, - pub execute: GasConsumptionData, -} - -impl BlockGasConsumptionData { - pub fn wei_spent(&self) -> u128 { - self.commit.wei_spent() + self.prove.wei_spent() + self.execute.wei_spent() - } -} - -#[derive(Debug)] -pub struct FeeMonitorDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, -} - -impl FeeMonitorDal<'_, '_> { - /// Returns data related to the gas consumption and gas costs for certain block. - /// In case of any unexpected situation (i.e. some data is not present in the database) - /// will return an error. - pub fn get_block_gas_consumption( - &mut self, - block_number: L1BatchNumber, - ) -> Result { - async_std::task::block_on(async { - let res: StorageBlockGasData = sqlx::query_as!( - StorageBlockGasData, - r#" - SELECT - l1_batches.number, - commit_tx_data.gas_used as "commit_gas?", - commit_tx.base_fee_per_gas as "commit_base_gas_price?", - commit_tx.priority_fee_per_gas as "commit_priority_gas_price?", - prove_tx_data.gas_used as "prove_gas?", - prove_tx.base_fee_per_gas as "prove_base_gas_price?", - prove_tx.priority_fee_per_gas as "prove_priority_gas_price?", - execute_tx_data.gas_used as "execute_gas?", - execute_tx.base_fee_per_gas as "execute_base_gas_price?", - execute_tx.priority_fee_per_gas as "execute_priority_gas_price?" - FROM l1_batches - LEFT JOIN eth_txs_history as commit_tx - ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs as commit_tx_data - ON (l1_batches.eth_commit_tx_id = commit_tx_data.id) - LEFT JOIN eth_txs_history as prove_tx - ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs as prove_tx_data - ON (l1_batches.eth_prove_tx_id = prove_tx_data.id) - LEFT JOIN eth_txs_history as execute_tx - ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) - LEFT JOIN eth_txs as execute_tx_data - ON (l1_batches.eth_execute_tx_id = execute_tx_data.id) - WHERE l1_batches.number = $1 - "#, - block_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .ok_or_else(|| anyhow::format_err!("No block details for requested block {block_number}"))?; - - // Closure extracting `u64` out of `Option`. - // Normally we expect data to be present, but if for any reason it isn't we'll just return an error: - // it's tracking module, so no big deal. - let extract = |opt: Option| { - opt.map(|val| val as u64).ok_or_else(|| { - anyhow::format_err!("Some field was `None` for block {block_number}. Data from database: {res:?}") - }) - }; - - Ok(BlockGasConsumptionData { - block_number, - commit: GasConsumptionData { - consumed_gas: extract(res.commit_gas)?, - base_gas_price: extract(res.commit_base_gas_price)?, - priority_gas_price: extract(res.commit_priority_gas_price)?, - }, - prove: GasConsumptionData { - consumed_gas: extract(res.prove_gas)?, - base_gas_price: extract(res.prove_base_gas_price)?, - priority_gas_price: extract(res.prove_priority_gas_price)?, - }, - execute: GasConsumptionData { - consumed_gas: extract(res.execute_gas)?, - base_gas_price: extract(res.execute_base_gas_price)?, - priority_gas_price: extract(res.execute_priority_gas_price)?, - }, - }) - }) - } - - /// Fetches ETH ERC-20 transfers to a certain account for a certain block. - /// Returns the vector of transfer amounts. - pub fn fetch_erc20_transfers( - &mut self, - block_number: L1BatchNumber, - account: Address, - ) -> Result, anyhow::Error> { - // We expect one log per transaction, thus limitiing is not really important. - const MAX_LOGS_PER_BLOCK: usize = 100_000; - - // Event signature: `Transfer(address from, address to, uint256 value)`. - // We're filtering by the 1st (signature hash) and 3rd (receiver). - let topics = vec![ - (1, vec![ERC20_TRANSFER_TOPIC]), - (3, vec![address_to_h256(&account)]), - ]; - let miniblocks_range = match self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(block_number) - { - Some(range) => range, - None => return Ok(Vec::new()), - }; - - let logs = { - let mut events_web3_dal = EventsWeb3Dal { - storage: self.storage, - }; - events_web3_dal.get_logs( - GetLogsFilter { - from_block: miniblocks_range.0, - to_block: Some(api::BlockNumber::Number(miniblocks_range.1 .0.into())), - addresses: vec![L2_ETH_TOKEN_ADDRESS], - topics, - }, - MAX_LOGS_PER_BLOCK, - )? - }; - - // Now collect the transfer amounts from retrieved logs. - let balances: Vec<_> = logs - .into_iter() - .map(|log| U256::from_big_endian(&log.data.0)) - .collect(); - - Ok(balances) - } -} diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs new file mode 100644 index 000000000000..c30edc51182a --- /dev/null +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -0,0 +1,298 @@ +use std::collections::HashMap; +use std::convert::TryFrom; +use std::time::{Duration, Instant}; +use zksync_config::configs::fri_prover_group::CircuitIdRoundTuple; + +use zksync_types::proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}; +use zksync_types::L1BatchNumber; + +use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct FriProverDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +impl FriProverDal<'_, '_> { + pub async fn insert_prover_jobs( + &mut self, + l1_batch_number: L1BatchNumber, + circuit_ids_and_urls: Vec<(u8, String)>, + aggregation_round: AggregationRound, + depth: u16, + ) { + let started_at = Instant::now(); + for (sequence_number, (circuit_id, circuit_blob_url)) in + circuit_ids_and_urls.iter().enumerate() + { + self.insert_prover_job( + l1_batch_number, + *circuit_id, + depth, + sequence_number, + aggregation_round, + circuit_blob_url, + false, + ) + .await; + } + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_fri_prover_jobs"); + } + + pub async fn get_next_job(&mut self) -> Option { + let result: Option = sqlx::query!( + " + UPDATE prover_jobs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE id = ( + SELECT id + FROM prover_jobs_fri + WHERE status = 'queued' + ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + ", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }); + result + } + + pub async fn get_next_job_for_circuit_id_round( + &mut self, + circuits_to_pick: &[CircuitIdRoundTuple], + ) -> Option { + let circuit_ids: Vec<_> = circuits_to_pick + .iter() + .map(|tuple| tuple.circuit_id as i16) + .collect(); + let aggregation_rounds: Vec<_> = circuits_to_pick + .iter() + .map(|tuple| tuple.aggregation_round as i16) + .collect(); + let result: Option = sqlx::query!( + " + UPDATE prover_jobs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE id = ( + SELECT id + FROM prover_jobs_fri + WHERE status = 'queued' + AND (circuit_id, aggregation_round) IN ( + SELECT * FROM UNNEST($1::smallint[], $2::smallint[]) + ) + ORDER BY aggregation_round DESC, l1_batch_number ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + ", + &circuit_ids[..], + &aggregation_rounds[..], + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }); + result + } + + pub async fn save_proof_error(&mut self, id: u32, error: String) { + { + sqlx::query!( + " + UPDATE prover_jobs_fri + SET status = 'failed', error = $1, updated_at = now() + WHERE id = $2 + ", + error, + id as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + } + + pub async fn save_proof( + &mut self, + id: u32, + time_taken: Duration, + blob_url: &str, + ) -> FriProverJobMetadata { + let started_at = Instant::now(); + let result = sqlx::query!( + " + UPDATE prover_jobs_fri + SET status = 'successful', updated_at = now(), time_taken = $1, proof_blob_url=$2 + WHERE id = $3 + RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + ", + duration_to_naive_time(time_taken), + blob_url, + id as i64, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(row.aggregation_round as i32).unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) + .unwrap(); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_fri_proof"); + result + } + + pub async fn requeue_stuck_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + { + sqlx::query!( + " + UPDATE prover_jobs_fri + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING id, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + } + + #[allow(clippy::too_many_arguments)] + pub async fn insert_prover_job( + &mut self, + l1_batch_number: L1BatchNumber, + circuit_id: u8, + depth: u16, + sequence_number: usize, + aggregation_round: AggregationRound, + circuit_blob_url: &str, + is_node_final_proof: bool, + ) { + sqlx::query!( + " + INSERT INTO prover_jobs_fri (l1_batch_number, circuit_id, circuit_blob_url, aggregation_round, sequence_number, depth, is_node_final_proof, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, 'queued', now(), now()) + ON CONFLICT(l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) + DO UPDATE SET updated_at=now() + ", + l1_batch_number.0 as i64, + circuit_id as i16, + circuit_blob_url, + aggregation_round as i64, + sequence_number as i64, + depth as i32, + is_node_final_proof, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_prover_jobs_stats(&mut self) -> HashMap<(u8, u8), JobCountStatistics> { + { + sqlx::query!( + r#" + SELECT COUNT(*) as "count!", circuit_id as "circuit_id!", aggregation_round as "aggregation_round!", status as "status!" + FROM prover_jobs_fri + GROUP BY circuit_id, aggregation_round, status + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.circuit_id, row.aggregation_round, row.status, row.count as usize)) + .fold(HashMap::new(), |mut acc, (circuit_id, aggregation_round, status, value)| { + let stats = acc.entry((circuit_id as u8, aggregation_round as u8)).or_insert(JobCountStatistics { + queued: 0, + in_progress: 0, + failed: 0, + successful: 0, + }); + match status.as_ref() { + "queued" => stats.queued = value, + "in_progress" => stats.in_progress = value, + "failed" => stats.failed = value, + "successful" => stats.successful = value, + _ => (), + } + acc + }) + } + } + + pub async fn min_unproved_l1_batch_number(&mut self) -> HashMap<(u8, u8), L1BatchNumber> { + { + sqlx::query!( + r#" + SELECT MIN(l1_batch_number) as "l1_batch_number!", circuit_id, aggregation_round + FROM prover_jobs_fri + WHERE status IN('queued', 'in_progress', 'failed') + GROUP BY circuit_id, aggregation_round + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + ( + (row.circuit_id as u8, row.aggregation_round as u8), + L1BatchNumber(row.l1_batch_number as u32), + ) + }) + .collect() + } + } +} diff --git a/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs b/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs new file mode 100644 index 000000000000..3844f5777cec --- /dev/null +++ b/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs @@ -0,0 +1,114 @@ +use crate::StorageProcessor; +use zksync_types::L1BatchNumber; + +#[derive(Debug)] +pub struct FriSchedulerDependencyTrackerDal<'a, 'c> { + pub storage: &'a mut StorageProcessor<'c>, +} + +impl FriSchedulerDependencyTrackerDal<'_, '_> { + pub async fn get_l1_batches_ready_for_queuing(&mut self) -> Vec { + sqlx::query!( + r#" + UPDATE scheduler_dependency_tracker_fri + SET status='queuing' + WHERE l1_batch_number IN + (SELECT l1_batch_number FROM scheduler_dependency_tracker_fri + WHERE status != 'queued' + AND circuit_1_final_prover_job_id IS NOT NULL + AND circuit_2_final_prover_job_id IS NOT NULL + AND circuit_3_final_prover_job_id IS NOT NULL + AND circuit_4_final_prover_job_id IS NOT NULL + AND circuit_5_final_prover_job_id IS NOT NULL + AND circuit_6_final_prover_job_id IS NOT NULL + AND circuit_7_final_prover_job_id IS NOT NULL + AND circuit_8_final_prover_job_id IS NOT NULL + AND circuit_9_final_prover_job_id IS NOT NULL + AND circuit_10_final_prover_job_id IS NOT NULL + AND circuit_11_final_prover_job_id IS NOT NULL + AND circuit_12_final_prover_job_id IS NOT NULL + AND circuit_13_final_prover_job_id IS NOT NULL + ) + RETURNING l1_batch_number; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| row.l1_batch_number) + .collect() + } + + pub async fn mark_l1_batches_queued(&mut self, l1_batches: Vec) { + sqlx::query!( + r#" + UPDATE scheduler_dependency_tracker_fri + SET status='queued' + WHERE l1_batch_number = ANY($1) + "#, + &l1_batches[..] + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn set_final_prover_job_id_for_l1_batch( + &mut self, + circuit_id: u8, + final_prover_job_id: u32, + l1_batch_number: L1BatchNumber, + ) { + let query = format!( + r#" + UPDATE scheduler_dependency_tracker_fri + SET circuit_{}_final_prover_job_id = $1 + WHERE l1_batch_number = $2 + "#, + circuit_id + ); + sqlx::query(&query) + .bind(final_prover_job_id as i64) + .bind(l1_batch_number.0 as i64) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_final_prover_job_ids_for( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> [u32; 13] { + sqlx::query!( + r#" + SELECT * FROM scheduler_dependency_tracker_fri + WHERE l1_batch_number = $1 + "#, + l1_batch_number.0 as i64, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .next() + .map(|row| { + [ + row.circuit_1_final_prover_job_id.unwrap() as u32, + row.circuit_2_final_prover_job_id.unwrap() as u32, + row.circuit_3_final_prover_job_id.unwrap() as u32, + row.circuit_4_final_prover_job_id.unwrap() as u32, + row.circuit_5_final_prover_job_id.unwrap() as u32, + row.circuit_6_final_prover_job_id.unwrap() as u32, + row.circuit_7_final_prover_job_id.unwrap() as u32, + row.circuit_8_final_prover_job_id.unwrap() as u32, + row.circuit_9_final_prover_job_id.unwrap() as u32, + row.circuit_10_final_prover_job_id.unwrap() as u32, + row.circuit_11_final_prover_job_id.unwrap() as u32, + row.circuit_12_final_prover_job_id.unwrap() as u32, + row.circuit_13_final_prover_job_id.unwrap() as u32, + ] + }) + .unwrap() + } +} diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs new file mode 100644 index 000000000000..ca88abbcbc6b --- /dev/null +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -0,0 +1,720 @@ +use sqlx::Row; +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +use zksync_types::proofs::{ + AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, + StuckJobs, +}; +use zksync_types::L1BatchNumber; + +use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; +use crate::StorageProcessor; + +#[derive(Debug)] +pub struct FriWitnessGeneratorDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] +pub enum FriWitnessJobStatus { + #[strum(serialize = "failed")] + Failed, + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "successful")] + Successful, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "queued")] + Queued, +} + +impl FriWitnessGeneratorDal<'_, '_> { + pub async fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { + { + sqlx::query!( + "INSERT INTO witness_inputs_fri(l1_batch_number, merkle_tree_paths_blob_url, status, created_at, updated_at) \ + VALUES ($1, $2, 'queued', now(), now()) + ON CONFLICT (l1_batch_number) DO NOTHING", + block_number.0 as i64, + object_key, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + } + } + + pub async fn get_next_basic_circuit_witness_job( + &mut self, + last_l1_batch_to_process: u32, + ) -> Option { + let result: Option = sqlx::query!( + " + UPDATE witness_inputs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE l1_batch_number = ( + SELECT l1_batch_number + FROM witness_inputs_fri + WHERE l1_batch_number <= $1 + AND status = 'queued' + ORDER BY l1_batch_number ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING witness_inputs_fri.* + ", + last_l1_batch_to_process as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + result + } + + pub async fn mark_witness_job( + &mut self, + status: FriWitnessJobStatus, + block_number: L1BatchNumber, + ) { + sqlx::query!( + " + UPDATE witness_inputs_fri SET status =$1, updated_at = now() + WHERE l1_batch_number = $2 + ", + format!("{}", status), + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_witness_job_as_successful( + &mut self, + block_number: L1BatchNumber, + time_taken: Duration, + ) { + sqlx::query!( + " + UPDATE witness_inputs_fri + SET status = 'successful', updated_at = now(), time_taken = $1 + WHERE l1_batch_number = $2 + ", + duration_to_naive_time(time_taken), + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_witness_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { + sqlx::query!( + " + UPDATE witness_inputs_fri SET status ='failed', error= $1, updated_at = now() + WHERE l1_batch_number = $2 + ", + error, + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_leaf_aggregation_job_failed(&mut self, error: &str, id: u32) { + sqlx::query!( + " + UPDATE leaf_aggregation_witness_jobs_fri + SET status ='failed', error= $1, updated_at = now() + WHERE id = $2 + ", + error, + id as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_leaf_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { + sqlx::query!( + " + UPDATE leaf_aggregation_witness_jobs_fri + SET status = 'successful', updated_at = now(), time_taken = $1 + WHERE id = $2 + ", + duration_to_naive_time(time_taken), + id as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn requeue_stuck_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + sqlx::query!( + " + UPDATE witness_inputs_fri + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING l1_batch_number, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.l1_batch_number as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + + pub async fn create_aggregation_jobs( + &mut self, + block_number: L1BatchNumber, + closed_form_inputs_and_urls: &Vec<(u8, String, usize)>, + scheduler_partial_input_blob_url: &str, + base_layer_to_recursive_layer_circuit_id: fn(u8) -> u8, + ) { + { + let started_at = Instant::now(); + for (circuit_id, closed_form_inputs_url, number_of_basic_circuits) in + closed_form_inputs_and_urls + { + sqlx::query!( + " + INSERT INTO leaf_aggregation_witness_jobs_fri + (l1_batch_number, circuit_id, closed_form_inputs_blob_url, number_of_basic_circuits, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, 'waiting_for_proofs', now(), now()) + ON CONFLICT(l1_batch_number, circuit_id) + DO UPDATE SET updated_at=now() + ", + block_number.0 as i64, + *circuit_id as i16, + closed_form_inputs_url, + *number_of_basic_circuits as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + self.insert_node_aggregation_jobs( + block_number, + base_layer_to_recursive_layer_circuit_id(*circuit_id), + None, + 0, + "", + ) + .await; + } + + sqlx::query!( + " + INSERT INTO scheduler_witness_jobs_fri + (l1_batch_number, scheduler_partial_input_blob_url, status, created_at, updated_at) + VALUES ($1, $2, 'waiting_for_proofs', now(), now()) + ON CONFLICT(l1_batch_number) + DO UPDATE SET updated_at=now() + ", + block_number.0 as i64, + scheduler_partial_input_blob_url, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + + sqlx::query!( + " + INSERT INTO scheduler_dependency_tracker_fri + (l1_batch_number, status, created_at, updated_at) + VALUES ($1, 'waiting_for_proofs', now(), now()) + ON CONFLICT(l1_batch_number) + DO UPDATE SET updated_at=now() + ", + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "create_aggregation_jobs_fri"); + } + } + + pub async fn get_next_leaf_aggregation_job(&mut self) -> Option { + let row = sqlx::query!( + " + UPDATE leaf_aggregation_witness_jobs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE id = ( + SELECT id + FROM leaf_aggregation_witness_jobs_fri + WHERE status = 'queued' + ORDER BY l1_batch_number ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING leaf_aggregation_witness_jobs_fri.* + ", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + let block_number = L1BatchNumber(row.l1_batch_number as u32); + let proof_job_ids = self + .prover_job_ids_for( + block_number, + row.circuit_id as u8, + AggregationRound::BasicCircuits, + 0, + ) + .await; + Some(LeafAggregationJobMetadata { + id: row.id as u32, + block_number, + circuit_id: row.circuit_id as u8, + prover_job_ids_for_proofs: proof_job_ids, + }) + } + + async fn prover_job_ids_for( + &mut self, + block_number: L1BatchNumber, + circuit_id: u8, + round: AggregationRound, + depth: u16, + ) -> Vec { + sqlx::query!( + " + SELECT id from prover_jobs_fri + WHERE l1_batch_number = $1 + AND circuit_id = $2 + AND aggregation_round = $3 + AND depth = $4 + AND status = 'successful' + ORDER BY sequence_number ASC; + ", + block_number.0 as i64, + circuit_id as i16, + round as i16, + depth as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| row.id as u32) + .collect::<_>() + } + + pub async fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec<(i64, u8)> { + sqlx::query!( + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET status='queued' + WHERE (l1_batch_number, circuit_id) IN + (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id + FROM prover_jobs_fri + JOIN leaf_aggregation_witness_jobs_fri lawj ON + prover_jobs_fri.l1_batch_number = lawj.l1_batch_number + AND prover_jobs_fri.circuit_id = lawj.circuit_id + WHERE lawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 0 + GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, lawj.number_of_basic_circuits + HAVING COUNT(*) = lawj.number_of_basic_circuits) + RETURNING l1_batch_number, circuit_id; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8)) + .collect() + } + + pub async fn update_node_aggregation_jobs_url( + &mut self, + block_number: L1BatchNumber, + circuit_id: u8, + number_of_dependent_jobs: usize, + depth: u16, + url: String, + ) { + sqlx::query!( + " + UPDATE node_aggregation_witness_jobs_fri + SET aggregations_url = $1, number_of_dependent_jobs = $5, updated_at = now() + WHERE l1_batch_number = $2 + AND circuit_id = $3 + AND depth = $4 + ", + url, + block_number.0 as i64, + circuit_id as i16, + depth as i32, + number_of_dependent_jobs as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_next_node_aggregation_job(&mut self) -> Option { + let row = sqlx::query!( + " + UPDATE node_aggregation_witness_jobs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE id = ( + SELECT id + FROM node_aggregation_witness_jobs_fri + WHERE status = 'queued' + ORDER BY l1_batch_number ASC, depth ASC, id ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING node_aggregation_witness_jobs_fri.* + ", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap()?; + let depth = row.depth as u16; + + let round = match depth { + // Zero depth implies this is the first time we are performing node aggregation, + // i.e we load proofs from previous round that is leaf aggregation. + 0 => AggregationRound::LeafAggregation, + _ => AggregationRound::NodeAggregation, + }; + + let block_number = L1BatchNumber(row.l1_batch_number as u32); + let prover_job_ids = self + .prover_job_ids_for(block_number, row.circuit_id as u8, round, depth) + .await; + Some(NodeAggregationJobMetadata { + id: row.id as u32, + block_number, + circuit_id: row.circuit_id as u8, + depth, + prover_job_ids_for_proofs: prover_job_ids, + }) + } + + pub async fn mark_node_aggregation_job_failed(&mut self, error: &str, id: u32) { + sqlx::query!( + " + UPDATE node_aggregation_witness_jobs_fri + SET status ='failed', error= $1, updated_at = now() + WHERE id = $2 + ", + error, + id as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_node_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { + sqlx::query!( + " + UPDATE node_aggregation_witness_jobs_fri + SET status = 'successful', updated_at = now(), time_taken = $1 + WHERE id = $2 + ", + duration_to_naive_time(time_taken), + id as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_node_aggregation_jobs( + &mut self, + block_number: L1BatchNumber, + circuit_id: u8, + number_of_dependent_jobs: Option, + depth: u16, + aggregations_url: &str, + ) { + sqlx::query!( + "INSERT INTO node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, depth, aggregations_url, number_of_dependent_jobs, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, 'waiting_for_proofs', now(), now()) + ON CONFLICT(l1_batch_number, circuit_id, depth) + DO UPDATE SET updated_at=now()", + block_number.0 as i64, + circuit_id as i16, + depth as i32, + aggregations_url, + number_of_dependent_jobs + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn move_depth_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { + sqlx::query!( + r#" + UPDATE node_aggregation_witness_jobs_fri + SET status='queued' + WHERE (l1_batch_number, circuit_id, depth) IN + (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth + FROM prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON + prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 1 + AND prover_jobs_fri.depth = 0 + GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs + HAVING COUNT(*) = nawj.number_of_dependent_jobs) + RETURNING l1_batch_number, circuit_id, depth; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) + .collect() + } + + pub async fn move_depth_non_zero_node_aggregation_jobs(&mut self) -> Vec<(i64, u8, u16)> { + sqlx::query!( + r#" + UPDATE node_aggregation_witness_jobs_fri + SET status='queued' + WHERE (l1_batch_number, circuit_id, depth) IN + (SELECT prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth + FROM prover_jobs_fri + JOIN node_aggregation_witness_jobs_fri nawj ON + prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth + WHERE nawj.status = 'waiting_for_proofs' + AND prover_jobs_fri.status = 'successful' + AND prover_jobs_fri.aggregation_round = 2 + GROUP BY prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id, prover_jobs_fri.depth, nawj.number_of_dependent_jobs + HAVING COUNT(*) = nawj.number_of_dependent_jobs) + RETURNING l1_batch_number, circuit_id, depth; + "#, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number, row.circuit_id as u8, row.depth as u16)) + .collect() + } + + pub async fn requeue_stuck_leaf_aggregations_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + sqlx::query!( + " + UPDATE leaf_aggregation_witness_jobs_fri + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING id, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + + pub async fn requeue_stuck_node_aggregations_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + sqlx::query!( + " + UPDATE node_aggregation_witness_jobs_fri + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING id, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.id as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + + pub async fn mark_scheduler_jobs_as_queued(&mut self, l1_batch_number: i64) { + sqlx::query!( + r#" + UPDATE scheduler_witness_jobs_fri + SET status='queued' + WHERE l1_batch_number = $1 + AND status != 'successful' + AND status != 'in_progress' + "#, + l1_batch_number + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn requeue_stuck_scheduler_jobs( + &mut self, + processing_timeout: Duration, + max_attempts: u32, + ) -> Vec { + let processing_timeout = pg_interval_from_duration(processing_timeout); + sqlx::query!( + " + UPDATE scheduler_witness_jobs_fri + SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() + WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'failed' AND attempts < $2) + RETURNING l1_batch_number, status, attempts + ", + &processing_timeout, + max_attempts as i32, + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { id: row.l1_batch_number as u64, status: row.status, attempts: row.attempts as u64 }) + .collect() + } + + pub async fn get_next_scheduler_witness_job(&mut self) -> Option { + let result: Option = sqlx::query!( + " + UPDATE scheduler_witness_jobs_fri + SET status = 'in_progress', attempts = attempts + 1, + updated_at = now(), processing_started_at = now() + WHERE l1_batch_number = ( + SELECT l1_batch_number + FROM scheduler_witness_jobs_fri + WHERE status = 'queued' + ORDER BY l1_batch_number ASC + LIMIT 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING scheduler_witness_jobs_fri.* + ", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + result + } + + pub async fn mark_scheduler_job_as_successful( + &mut self, + block_number: L1BatchNumber, + time_taken: Duration, + ) { + sqlx::query!( + " + UPDATE scheduler_witness_jobs_fri + SET status = 'successful', updated_at = now(), time_taken = $1 + WHERE l1_batch_number = $2 + ", + duration_to_naive_time(time_taken), + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn mark_scheduler_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { + sqlx::query!( + " + UPDATE scheduler_witness_jobs_fri + SET status ='failed', error= $1, updated_at = now() + WHERE l1_batch_number = $2 + ", + error, + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn get_witness_jobs_stats( + &mut self, + aggregation_round: AggregationRound, + ) -> JobCountStatistics { + let table_name = Self::input_table_name_for(aggregation_round); + let sql = format!( + r#" + SELECT COUNT(*) as "count", status as "status" + FROM {} + GROUP BY status + "#, + table_name + ); + let mut results: HashMap = sqlx::query(&sql) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.get("status"), row.get::("count"))) + .collect::>(); + + JobCountStatistics { + queued: results.remove("queued").unwrap_or(0i64) as usize, + in_progress: results.remove("in_progress").unwrap_or(0i64) as usize, + failed: results.remove("failed").unwrap_or(0i64) as usize, + successful: results.remove("successful").unwrap_or(0i64) as usize, + } + } + + fn input_table_name_for(aggregation_round: AggregationRound) -> &'static str { + match aggregation_round { + AggregationRound::BasicCircuits => "witness_inputs_fri", + AggregationRound::LeafAggregation => "leaf_aggregation_witness_jobs_fri", + AggregationRound::NodeAggregation => "node_aggregation_witness_jobs_fri", + AggregationRound::Scheduler => "scheduler_witness_jobs_fri", + } + } +} diff --git a/core/lib/dal/src/gpu_prover_queue_dal.rs b/core/lib/dal/src/gpu_prover_queue_dal.rs index 083b4d0cb483..55a998ee5c35 100644 --- a/core/lib/dal/src/gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/gpu_prover_queue_dal.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; #[derive(Debug)] pub struct GpuProverQueueDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } #[derive(Debug, Clone)] @@ -29,14 +29,14 @@ pub enum GpuProverInstanceStatus { } impl GpuProverQueueDal<'_, '_> { - pub fn lock_available_prover( + pub async fn lock_available_prover( &mut self, processing_timeout: Duration, specialized_prover_group_id: u8, region: String, zone: String, ) -> Option { - async_std::task::block_on(async { + { let processing_timeout = pg_interval_from_duration(processing_timeout); let result: Option = sqlx::query!( " @@ -75,10 +75,10 @@ impl GpuProverQueueDal<'_, '_> { }); result - }) + } } - pub fn insert_prover_instance( + pub async fn insert_prover_instance( &mut self, address: SocketAddress, queue_capacity: usize, @@ -87,7 +87,7 @@ impl GpuProverQueueDal<'_, '_> { zone: String, num_gpu: u8, ) { - async_std::task::block_on(async { + { sqlx::query!( " INSERT INTO gpu_prover_queue (instance_host, instance_port, queue_capacity, queue_free_slots, instance_status, specialized_prover_group_id, region, zone, num_gpu, created_at, updated_at) @@ -104,10 +104,10 @@ impl GpuProverQueueDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn update_prover_instance_status( + pub async fn update_prover_instance_status( &mut self, address: SocketAddress, status: GpuProverInstanceStatus, @@ -115,7 +115,7 @@ impl GpuProverQueueDal<'_, '_> { region: String, zone: String, ) { - async_std::task::block_on(async { + { sqlx::query!( " UPDATE gpu_prover_queue @@ -135,17 +135,17 @@ impl GpuProverQueueDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn update_prover_instance_from_full_to_available( + pub async fn update_prover_instance_from_full_to_available( &mut self, address: SocketAddress, queue_free_slots: usize, region: String, zone: String, ) { - async_std::task::block_on(async { + { sqlx::query!( " UPDATE gpu_prover_queue @@ -165,11 +165,11 @@ impl GpuProverQueueDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_prover_gpu_count_per_region_zone(&mut self) -> HashMap<(String, String), u64> { - async_std::task::block_on(async { + pub async fn get_prover_gpu_count_per_region_zone(&mut self) -> HashMap<(String, String), u64> { + { sqlx::query!( r#" SELECT region, zone, SUM(num_gpu) AS total_gpus @@ -183,6 +183,6 @@ impl GpuProverQueueDal<'_, '_> { .into_iter() .map(|row| ((row.region, row.zone), row.total_gpus.unwrap() as u64)) .collect() - }) + } } } diff --git a/core/lib/dal/src/healthcheck.rs b/core/lib/dal/src/healthcheck.rs index dfc10c09d068..48ffe001a9b6 100644 --- a/core/lib/dal/src/healthcheck.rs +++ b/core/lib/dal/src/healthcheck.rs @@ -15,11 +15,12 @@ impl ConnectionPoolHealthCheck { } } +#[zksync_health_check::async_trait] impl CheckHealth for ConnectionPoolHealthCheck { - fn check_health(&self) -> CheckHealthStatus { + async fn check_health(&self) -> CheckHealthStatus { // This check is rather feeble, plan to make reliable here: // https://linear.app/matterlabs/issue/PLA-255/revamp-db-connection-health-check - let _ = self.connection_pool.access_storage_blocking(); + let _ = self.connection_pool.access_storage().await; CheckHealthStatus::Ready } } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index d8cf27e7218f..9d8f4066849d 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -3,7 +3,6 @@ use std::env; // Built-in deps -use async_std::task::block_on; pub use sqlx::Error as SqlxError; use sqlx::{postgres::Postgres, Connection, PgConnection, Transaction}; // External imports @@ -18,21 +17,25 @@ use crate::connection::{holder::ConnectionHolder, test_pool::TestPoolLock}; use crate::eth_sender_dal::EthSenderDal; use crate::events_dal::EventsDal; use crate::events_web3_dal::EventsWeb3Dal; -use crate::explorer::ExplorerIntermediator; -use crate::fee_monitor_dal::FeeMonitorDal; +use crate::explorer::ExplorerIntermediary; +use crate::fri_prover_dal::FriProverDal; +use crate::fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal; +use crate::fri_witness_generator_dal::FriWitnessGeneratorDal; use crate::gpu_prover_queue_dal::GpuProverQueueDal; use crate::prover_dal::ProverDal; use crate::storage_dal::StorageDal; -use crate::storage_load_dal::StorageLoadDal; use crate::storage_logs_dal::StorageLogsDal; use crate::storage_logs_dedup_dal::StorageLogsDedupDal; use crate::storage_web3_dal::StorageWeb3Dal; +use crate::sync_dal::SyncDal; use crate::tokens_dal::TokensDal; use crate::tokens_web3_dal::TokensWeb3Dal; use crate::transactions_dal::TransactionsDal; use crate::transactions_web3_dal::TransactionsWeb3Dal; use crate::witness_generator_dal::WitnessGeneratorDal; +#[macro_use] +mod macro_utils; pub mod blocks_dal; pub mod blocks_web3_dal; pub mod connection; @@ -40,16 +43,18 @@ pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; pub mod explorer; -pub mod fee_monitor_dal; +pub mod fri_prover_dal; +pub mod fri_scheduler_dependency_tracker_dal; +pub mod fri_witness_generator_dal; pub mod gpu_prover_queue_dal; pub mod healthcheck; mod models; pub mod prover_dal; pub mod storage_dal; -pub mod storage_load_dal; pub mod storage_logs_dal; pub mod storage_logs_dedup_dal; pub mod storage_web3_dal; +pub mod sync_dal; pub mod time_utils; pub mod tokens_dal; pub mod tokens_web3_dal; @@ -65,6 +70,11 @@ pub fn get_master_database_url() -> String { env::var("DATABASE_URL").expect("DATABASE_URL must be set") } +/// Obtains the master prover database URL from the environment variable. +pub fn get_prover_database_url() -> String { + env::var("DATABASE_PROVER_URL").unwrap_or_else(|_| get_master_database_url()) +} + /// Obtains the replica database URL from the environment variable. pub fn get_replica_database_url() -> String { env::var("DATABASE_REPLICA_URL").unwrap_or_else(|_| get_master_database_url()) @@ -85,13 +95,7 @@ pub struct StorageProcessor<'a> { } impl<'a> StorageProcessor<'a> { - /// WARNING: this method is intentionally private. - /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. - /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). - /// Use blocking counterpart instead. - /// - /// Creates a `StorageProcessor` using an unique sole connection to the database. - async fn establish_connection(connect_to_master: bool) -> StorageProcessor<'static> { + pub async fn establish_connection(connect_to_master: bool) -> StorageProcessor<'static> { let database_url = if connect_to_master { get_master_database_url() } else { @@ -104,16 +108,7 @@ impl<'a> StorageProcessor<'a> { } } - /// Creates a `StorageProcessor` using an unique sole connection to the database. - pub fn establish_connection_blocking(connect_to_master: bool) -> StorageProcessor<'static> { - block_on(Self::establish_connection(connect_to_master)) - } - - /// WARNING: this method is intentionally private. - /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. - /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). - /// Use blocking counterpart instead. - async fn start_transaction<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { + pub async fn start_transaction<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { let transaction = self.conn().begin().await.unwrap(); let mut processor = StorageProcessor::from_transaction(transaction); @@ -122,10 +117,6 @@ impl<'a> StorageProcessor<'a> { processor } - pub fn start_transaction_blocking<'c: 'b, 'b>(&'c mut self) -> StorageProcessor<'b> { - block_on(self.start_transaction()) - } - /// Checks if the `StorageProcessor` is currently within database transaction. pub fn in_transaction(&self) -> bool { self.in_transaction @@ -145,12 +136,7 @@ impl<'a> StorageProcessor<'a> { } } - /// WARNING: this method is intentionally private. - /// `zksync_dal` crate uses `async-std` runtime, whereas most of our crates use `tokio`. - /// Calling `async-std` future from `tokio` context may cause deadlocks (and it did happen). - /// Use blocking counterpart instead. - /// - async fn commit(self) { + pub async fn commit(self) { if let ConnectionHolder::Transaction(transaction) = self.conn { transaction.commit().await.unwrap(); } else { @@ -158,10 +144,6 @@ impl<'a> StorageProcessor<'a> { } } - pub fn commit_blocking(self) { - block_on(self.commit()) - } - /// Creates a `StorageProcessor` using a pool of connections. /// This method borrows one of the connections from the pool, and releases it /// after `drop`. @@ -225,10 +207,6 @@ impl<'a> StorageProcessor<'a> { StorageLogsDedupDal { storage: self } } - pub fn storage_load_dal(&mut self) -> StorageLoadDal<'_, 'a> { - StorageLoadDal { storage: self } - } - pub fn tokens_dal(&mut self) -> TokensDal<'_, 'a> { TokensDal { storage: self } } @@ -245,15 +223,29 @@ impl<'a> StorageProcessor<'a> { WitnessGeneratorDal { storage: self } } - pub fn explorer(&mut self) -> ExplorerIntermediator<'_, 'a> { - ExplorerIntermediator { storage: self } - } - - pub fn fee_monitor_dal(&mut self) -> FeeMonitorDal<'_, 'a> { - FeeMonitorDal { storage: self } + pub fn explorer(&mut self) -> ExplorerIntermediary<'_, 'a> { + ExplorerIntermediary { storage: self } } pub fn gpu_prover_queue_dal(&mut self) -> GpuProverQueueDal<'_, 'a> { GpuProverQueueDal { storage: self } } + + pub fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a> { + FriWitnessGeneratorDal { storage: self } + } + + pub fn fri_prover_jobs_dal(&mut self) -> FriProverDal<'_, 'a> { + FriProverDal { storage: self } + } + + pub fn sync_dal(&mut self) -> SyncDal<'_, 'a> { + SyncDal { storage: self } + } + + pub fn fri_scheduler_dependency_tracker_dal( + &mut self, + ) -> FriSchedulerDependencyTrackerDal<'_, 'a> { + FriSchedulerDependencyTrackerDal { storage: self } + } } diff --git a/core/lib/dal/src/macro_utils.rs b/core/lib/dal/src/macro_utils.rs new file mode 100644 index 000000000000..cbd76d7e017f --- /dev/null +++ b/core/lib/dal/src/macro_utils.rs @@ -0,0 +1,20 @@ +//! Miscellaneous helper macros. + +/// Writes to a [`String`]. This is equivalent to `write!`, but without the need to `unwrap()` the result. +macro_rules! write_str { + ($buffer:expr, $($args:tt)+) => {{ + use std::fmt::Write as _; + let __buffer: &mut std::string::String = $buffer; + std::write!(__buffer, $($args)+).unwrap(); // Writing to a string cannot result in an error + }}; +} + +/// Writing a line to a [`String`]. This is equivalent to `writeln!`, but without the need +/// to `unwrap()` the result. +macro_rules! writeln_str { + ($buffer:expr, $($args:tt)+) => {{ + use std::fmt::Write as _; + let __buffer: &mut std::string::String = $buffer; + std::writeln!(__buffer, $($args)+).unwrap(); // Writing to a string cannot result in an error + }}; +} diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index e491bcf5678c..b3f1ab7ad8c9 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -6,6 +6,8 @@ pub mod storage_fee_monitor; pub mod storage_log; pub mod storage_prover_job_info; pub mod storage_state_record; +pub mod storage_sync; pub mod storage_token; pub mod storage_transaction; +pub mod storage_verification_request; pub mod storage_witness_job_info; diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 2941478269dd..185560d4a0ea 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -9,13 +9,14 @@ use sqlx::Postgres; use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::api::{self, BlockId}; +use zksync_types::api; use zksync_types::block::MiniblockHeader; use zksync_types::commitment::{BlockMetaParameters, BlockMetadata}; use zksync_types::explorer_api::{BlockDetails, L1BatchDetails, L1BatchPageItem}; use zksync_types::{ block::L1BatchHeader, explorer_api::{BlockPageItem, BlockStatus}, + l2_to_l1_log::L2ToL1Log, Address, L1BatchNumber, MiniblockNumber, H2048, H256, U256, }; @@ -95,7 +96,7 @@ impl From for L1BatchHeader { let l2_to_l1_logs: Vec<_> = block .l2_to_l1_logs .into_iter() - .map(|raw_data| raw_data.into()) + .map(|raw_log| L2ToL1Log::from_slice(&raw_log)) .collect(); L1BatchHeader { @@ -124,11 +125,11 @@ impl From for L1BatchHeader { bootloader: block .bootloader_code_hash .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), default_aa: block .default_aa_code_hash .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), }, l1_gas_price: block.l1_gas_price as u64, l2_fair_gas_price: block.l2_fair_gas_price as u64, @@ -142,10 +143,7 @@ impl TryInto for StorageBlock { fn try_into(self) -> Result { Ok(BlockMetadata { root_hash: H256::from_slice( - &self - .hash - .clone() - .ok_or(StorageBlockConvertError::IncompleteBlock)?, + &self.hash.ok_or(StorageBlockConvertError::IncompleteBlock)?, ), rollup_last_leaf_index: self .rollup_last_leaf_index @@ -154,25 +152,20 @@ impl TryInto for StorageBlock { merkle_root_hash: H256::from_slice( &self .merkle_root_hash - .clone() .ok_or(StorageBlockConvertError::IncompleteBlock)?, ), initial_writes_compressed: self .compressed_initial_writes - .clone() .ok_or(StorageBlockConvertError::IncompleteBlock)?, repeated_writes_compressed: self .compressed_repeated_writes - .clone() .ok_or(StorageBlockConvertError::IncompleteBlock)?, l2_l1_messages_compressed: self .l2_l1_compressed_messages - .clone() .ok_or(StorageBlockConvertError::IncompleteBlock)?, l2_l1_merkle_root: H256::from_slice( &self .l2_l1_merkle_root - .clone() .ok_or(StorageBlockConvertError::IncompleteBlock)?, ), aux_data_hash: H256::from_slice( @@ -275,6 +268,7 @@ pub fn l1_batch_page_item_from_storage( /// Returns block_number SQL statement pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String { match block_number { + api::BlockNumber::Number(number) => number.to_string(), api::BlockNumber::Earliest => 0.to_string(), api::BlockNumber::Pending => { "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string() @@ -282,7 +276,6 @@ pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String { api::BlockNumber::Latest | api::BlockNumber::Committed => { "(SELECT MAX(number) as number FROM miniblocks)".to_string() } - api::BlockNumber::Number(block_number) => format!("{}", block_number), api::BlockNumber::Finalized => " (SELECT COALESCE( ( @@ -302,23 +295,29 @@ pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String { } } -pub fn web3_block_where_sql(block_id: BlockId, arg_index: u8) -> String { +pub fn web3_block_where_sql(block_id: api::BlockId, arg_index: u8) -> String { match block_id { - BlockId::Hash(_) => format!("miniblocks.hash = ${}", arg_index), - BlockId::Number(number) => { + api::BlockId::Hash(_) => format!("miniblocks.hash = ${arg_index}"), + api::BlockId::Number(api::BlockNumber::Number(_)) => { + format!("miniblocks.number = ${arg_index}") + } + api::BlockId::Number(number) => { let block_sql = web3_block_number_to_sql(number); format!("miniblocks.number = {}", block_sql) } } } -pub fn bind_block_where_sql_params( - block_id: BlockId, - query: Query, -) -> Query { +pub fn bind_block_where_sql_params<'q>( + block_id: &'q api::BlockId, + query: Query<'q, Postgres, PgArguments>, +) -> Query<'q, Postgres, PgArguments> { match block_id { // these block_id types result in `$1` in the query string, which we have to `bind` - BlockId::Hash(block_hash) => query.bind(block_hash.0.to_vec()), + api::BlockId::Hash(block_hash) => query.bind(block_hash.as_bytes()), + api::BlockId::Number(api::BlockNumber::Number(number)) => { + query.bind(number.as_u64() as i64) + } // others don't introduce `$1`, so we don't have to `bind` anything _ => query, } @@ -372,14 +371,14 @@ impl StorageBlockDetails { prove_tx_hash: self .prove_tx_hash .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), proven_at: self .proven_at .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), execute_tx_hash: self .execute_tx_hash .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), executed_at: self .executed_at .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), @@ -389,11 +388,11 @@ impl StorageBlockDetails { bootloader: self .bootloader_code_hash .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), default_aa: self .default_aa_code_hash .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), }, operator_address: self .fee_account_address @@ -451,14 +450,14 @@ impl From for L1BatchDetails { prove_tx_hash: storage_l1_batch_details .prove_tx_hash .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), proven_at: storage_l1_batch_details .proven_at .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), execute_tx_hash: storage_l1_batch_details .execute_tx_hash .as_deref() - .map(|hash| H256::from_str(hash).expect("Incorrect verify_tx hash")), + .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), executed_at: storage_l1_batch_details .executed_at .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), @@ -468,11 +467,11 @@ impl From for L1BatchDetails { bootloader: storage_l1_batch_details .bootloader_code_hash .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), default_aa: storage_l1_batch_details .default_aa_code_hash .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), }, } } @@ -508,11 +507,11 @@ impl From for MiniblockHeader { bootloader: row .bootloader_code_hash .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), default_aa: row .default_aa_code_hash .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("Should be not none"), + .expect("should not be none"), }, } } @@ -555,12 +554,6 @@ mod tests { ); } - #[test] - fn test_web3_block_number_to_sql_number() { - let sql = web3_block_number_to_sql(api::BlockNumber::Number(123.into())); - assert_eq!(sql, "123".to_string()); - } - #[test] fn test_web3_block_number_to_sql_finalized() { let sql = web3_block_number_to_sql(api::BlockNumber::Finalized); diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index 92c76cce393a..e37d5c7d5376 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -2,7 +2,7 @@ use sqlx::types::chrono::NaiveDateTime; use std::str::FromStr; use zksync_types::aggregated_operations::AggregatedActionType; use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; -use zksync_types::{Address, L1BatchNumber, H256}; +use zksync_types::{Address, L1BatchNumber, Nonce, H256}; #[derive(Debug, Clone)] pub struct StorageEthTx { @@ -56,7 +56,7 @@ impl From for EthTx { fn from(tx: StorageEthTx) -> EthTx { EthTx { id: tx.id as u32, - nonce: tx.nonce as u64, + nonce: Nonce(tx.nonce as u32), contract_address: Address::from_str(&tx.contract_address) .expect("Incorrect address in db"), raw_tx: tx.raw_tx.clone(), @@ -95,7 +95,7 @@ impl From for TxHistoryToSend { signed_raw_tx: history .signed_raw_tx .expect("Should rely only on the new txs"), - nonce: history.nonce as u64, + nonce: Nonce(history.nonce as u32), } } } diff --git a/core/lib/dal/src/models/storage_event.rs b/core/lib/dal/src/models/storage_event.rs index 754c9fa1d6a6..001e4a2547a8 100644 --- a/core/lib/dal/src/models/storage_event.rs +++ b/core/lib/dal/src/models/storage_event.rs @@ -80,6 +80,7 @@ impl From for L2ToL1Log { shard_id: (log.shard_id as u32).into(), is_service: log.is_service, sender: Address::from_slice(&log.sender), + tx_index_in_l1_batch: Some(log.tx_index_in_l1_batch.into()), key: H256::from_slice(&log.key), value: H256::from_slice(&log.value), } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs new file mode 100644 index 000000000000..4cec6bdce815 --- /dev/null +++ b/core/lib/dal/src/models/storage_sync.rs @@ -0,0 +1,87 @@ +use std::str::FromStr; + +use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; + +use zksync_contracts::BaseSystemContractsHashes; +use zksync_types::api::en::SyncBlock; +use zksync_types::Transaction; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, H256}; + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageSyncBlock { + pub number: i64, + pub l1_batch_number: i64, + pub last_batch_miniblock: Option, + pub timestamp: i64, + pub root_hash: Option>, + pub commit_tx_hash: Option, + pub committed_at: Option, + pub prove_tx_hash: Option, + pub proven_at: Option, + pub execute_tx_hash: Option, + pub executed_at: Option, + // L1 gas price assumed in the corresponding batch + pub l1_gas_price: i64, + // L2 gas price assumed in the corresponding batch + pub l2_fair_gas_price: i64, + pub bootloader_code_hash: Option>, + pub default_aa_code_hash: Option>, + pub fee_account_address: Option>, // May be None if the block is not yet sealed +} + +impl StorageSyncBlock { + pub(crate) fn into_sync_block( + self, + current_operator_address: Address, + transactions: Option>, + ) -> SyncBlock { + SyncBlock { + number: MiniblockNumber(self.number as u32), + l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), + last_in_batch: self + .last_batch_miniblock + .map(|n| n == self.number) + .unwrap_or(false), + timestamp: self.timestamp as u64, + root_hash: self.root_hash.as_deref().map(H256::from_slice), + commit_tx_hash: self + .commit_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect commit_tx hash")), + committed_at: self + .committed_at + .map(|committed_at| DateTime::::from_utc(committed_at, Utc)), + prove_tx_hash: self + .prove_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect prove_tx hash")), + proven_at: self + .proven_at + .map(|proven_at| DateTime::::from_utc(proven_at, Utc)), + execute_tx_hash: self + .execute_tx_hash + .as_deref() + .map(|hash| H256::from_str(hash).expect("Incorrect execute_tx hash")), + executed_at: self + .executed_at + .map(|executed_at| DateTime::::from_utc(executed_at, Utc)), + l1_gas_price: self.l1_gas_price as u64, + l2_fair_gas_price: self.l2_fair_gas_price as u64, + base_system_contracts_hashes: BaseSystemContractsHashes { + bootloader: self + .bootloader_code_hash + .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) + .expect("Should not be none"), + default_aa: self + .default_aa_code_hash + .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) + .expect("Should not be none"), + }, + operator_address: self + .fee_account_address + .map(|fee_account_address| Address::from_slice(&fee_account_address)) + .unwrap_or(current_operator_address), + transactions, + } + } +} diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 0a5e4c5ed617..9cbd0d17fdf3 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -147,6 +147,9 @@ impl From for api::TransactionDetails { let gas_refunded = U256::from(tx_details.refunded_gas as u32); let fee = (gas_limit - gas_refunded) * effective_gas_price; + let gas_per_pubdata = + bigdecimal_to_u256(tx_details.gas_per_pubdata_limit.unwrap_or_default()); + let initiator_address = H160::from_slice(tx_details.initiator_address.as_slice()); let received_at = DateTime::::from_utc(tx_details.received_at, Utc); @@ -164,6 +167,7 @@ impl From for api::TransactionDetails { is_l1_originated: tx_details.is_priority, status, fee, + gas_per_pubdata: Some(gas_per_pubdata), initiator_address, received_at, eth_commit_tx_hash, @@ -195,14 +199,11 @@ pub fn web3_transaction_select_sql() -> &'static str { "# } -pub fn extract_web3_transaction( - db_row: PgRow, - chain_id: L2ChainId, -) -> zksync_types::api::Transaction { +pub fn extract_web3_transaction(db_row: PgRow, chain_id: L2ChainId) -> api::Transaction { let row_signature: Option> = db_row.get("signature"); let signature = row_signature.and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); - zksync_types::api::Transaction { + api::Transaction { hash: H256::from_slice(db_row.get("tx_hash")), nonce: U256::from(db_row.try_get::("nonce").ok().unwrap_or(0)), block_hash: db_row.try_get("block_hash").ok().map(H256::from_slice), diff --git a/core/lib/dal/src/models/storage_verification_request.rs b/core/lib/dal/src/models/storage_verification_request.rs new file mode 100644 index 000000000000..3c0864685380 --- /dev/null +++ b/core/lib/dal/src/models/storage_verification_request.rs @@ -0,0 +1,48 @@ +use zksync_types::explorer_api::{ + CompilerType, CompilerVersions, SourceCodeData, VerificationIncomingRequest, + VerificationRequest, +}; +use zksync_types::Address; + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct StorageVerificationRequest { + pub id: i64, + pub contract_address: Vec, + pub source_code: String, + pub contract_name: String, + pub zk_compiler_version: String, + pub compiler_version: String, + pub optimization_used: bool, + pub optimizer_mode: Option, + pub constructor_arguments: Vec, + pub is_system: bool, +} + +impl From for VerificationRequest { + fn from(value: StorageVerificationRequest) -> Self { + let source_code_data: SourceCodeData = serde_json::from_str(&value.source_code).unwrap(); + let compiler_versions = match source_code_data.compiler_type() { + CompilerType::Solc => CompilerVersions::Solc { + compiler_zksolc_version: value.zk_compiler_version, + compiler_solc_version: value.compiler_version, + }, + CompilerType::Vyper => CompilerVersions::Vyper { + compiler_zkvyper_version: value.zk_compiler_version, + compiler_vyper_version: value.compiler_version, + }, + }; + VerificationRequest { + id: value.id as usize, + req: VerificationIncomingRequest { + contract_address: Address::from_slice(&value.contract_address), + source_code_data, + contract_name: value.contract_name, + compiler_versions, + optimization_used: value.optimization_used, + optimizer_mode: value.optimizer_mode, + constructor_arguments: value.constructor_arguments.into(), + is_system: value.is_system, + }, + } + } +} diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index fa2414653268..71618db0881c 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -17,12 +17,12 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct ProverDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl ProverDal<'_, '_> { - pub fn get_next_prover_job(&mut self) -> Option { - async_std::task::block_on(async { + pub async fn get_next_prover_job(&mut self) -> Option { + { let result: Option = sqlx::query!( " UPDATE prover_jobs @@ -51,11 +51,11 @@ impl ProverDal<'_, '_> { sequence_number: row.sequence_number as usize, }); result - }) + } } - pub fn get_proven_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { - async_std::task::block_on(async { + pub async fn get_proven_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { + { sqlx::query!( r#"SELECT MAX(l1_batch_number) as "l1_batch_number!", aggregation_round FROM prover_jobs WHERE status='successful' @@ -73,14 +73,14 @@ impl ProverDal<'_, '_> { ) }) .collect() - }) + } } - pub fn get_next_prover_job_by_circuit_types( + pub async fn get_next_prover_job_by_circuit_types( &mut self, circuit_types: Vec, ) -> Option { - async_std::task::block_on(async { + { let result: Option = sqlx::query!( " UPDATE prover_jobs @@ -112,17 +112,17 @@ impl ProverDal<'_, '_> { }); result - }) + } } // If making changes to this method, consider moving the serialization logic to the DAL layer. - pub fn insert_prover_jobs( + pub async fn insert_prover_jobs( &mut self, l1_batch_number: L1BatchNumber, circuit_types_and_urls: Vec<(&'static str, String)>, aggregation_round: AggregationRound, ) { - async_std::task::block_on(async { + { let started_at = Instant::now(); let it = circuit_types_and_urls.into_iter().enumerate(); for (sequence_number, (circuit, circuit_input_blob_url)) in it { @@ -145,17 +145,17 @@ impl ProverDal<'_, '_> { metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_witness"); } - }) + } } - pub fn save_proof( + pub async fn save_proof( &mut self, id: u32, time_taken: Duration, proof: Vec, proccesed_by: &str, ) { - async_std::task::block_on(async { + { let started_at = Instant::now(); sqlx::query!( " @@ -173,11 +173,11 @@ impl ProverDal<'_, '_> { .unwrap(); metrics::histogram!("dal.request", started_at.elapsed(), "method" => "save_proof"); - }) + } } - pub fn save_proof_error(&mut self, id: u32, error: String, max_attempts: u32) { - async_std::task::block_on(async { + pub async fn save_proof_error(&mut self, id: u32, error: String, max_attempts: u32) { + { let mut transaction = self.storage.start_transaction().await; let row = sqlx::query!( @@ -197,20 +197,21 @@ impl ProverDal<'_, '_> { if row.attempts as u32 >= max_attempts { transaction .blocks_dal() - .set_skip_proof_for_l1_batch(L1BatchNumber(row.l1_batch_number as u32)); + .set_skip_proof_for_l1_batch(L1BatchNumber(row.l1_batch_number as u32)) + .await; } transaction.commit().await; - }) + } } - pub fn requeue_stuck_jobs( + pub async fn requeue_stuck_jobs( &mut self, processing_timeout: Duration, max_attempts: u32, ) -> Vec { let processing_timeout = pg_interval_from_duration(processing_timeout); - async_std::task::block_on(async { + { sqlx::query!( " UPDATE prover_jobs @@ -229,17 +230,17 @@ impl ProverDal<'_, '_> { .into_iter() .map(|row| StuckProverJobs{id: row.id as u64, status: row.status, attempts: row.attempts as u64}) .collect() - }) + } } // For each block in the provided range it returns a tuple: // (aggregation_coords; scheduler_proof) - pub fn get_final_proofs_for_blocks( + pub async fn get_final_proofs_for_blocks( &mut self, from_block: L1BatchNumber, to_block: L1BatchNumber, ) -> Vec { - async_std::task::block_on(async { + { sqlx::query!( "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords FROM prover_jobs @@ -271,11 +272,13 @@ impl ProverDal<'_, '_> { } }) .collect() - }) + } } - pub fn get_prover_jobs_stats_per_circuit(&mut self) -> HashMap { - async_std::task::block_on(async { + pub async fn get_prover_jobs_stats_per_circuit( + &mut self, + ) -> HashMap { + { sqlx::query!( r#" SELECT COUNT(*) as "count!", circuit_type as "circuit_type!", status as "status!" @@ -304,11 +307,11 @@ impl ProverDal<'_, '_> { } acc }) - }) + } } - pub fn get_prover_jobs_stats(&mut self) -> JobCountStatistics { - async_std::task::block_on(async { + pub async fn get_prover_jobs_stats(&mut self) -> JobCountStatistics { + { let mut results: HashMap = sqlx::query!( r#" SELECT COUNT(*) as "count!", status as "status!" @@ -328,11 +331,11 @@ impl ProverDal<'_, '_> { failed: results.remove("failed").unwrap_or(0usize), successful: results.remove("successful").unwrap_or(0usize), } - }) + } } - pub fn min_unproved_l1_batch_number(&mut self) -> Option { - async_std::task::block_on(async { + pub async fn min_unproved_l1_batch_number(&mut self) -> Option { + { sqlx::query!( r#" SELECT MIN(l1_batch_number) as "l1_batch_number?" FROM ( @@ -349,13 +352,13 @@ impl ProverDal<'_, '_> { .unwrap() .l1_batch_number .map(|n| L1BatchNumber(n as u32)) - }) + } } - pub fn min_unproved_l1_batch_number_by_basic_circuit_type( + pub async fn min_unproved_l1_batch_number_by_basic_circuit_type( &mut self, ) -> Vec<(String, L1BatchNumber)> { - async_std::task::block_on(async { + { sqlx::query!( r#" SELECT MIN(l1_batch_number) as "l1_batch_number!", circuit_type @@ -372,11 +375,11 @@ impl ProverDal<'_, '_> { .into_iter() .map(|row| (row.circuit_type, L1BatchNumber(row.l1_batch_number as u32))) .collect() - }) + } } - pub fn get_extended_stats(&mut self) -> anyhow::Result { - async_std::task::block_on(async { + pub async fn get_extended_stats(&mut self) -> anyhow::Result { + { let limits = sqlx::query!( r#" SELECT @@ -398,10 +401,12 @@ impl ProverDal<'_, '_> { .fetch_one(self.storage.conn()) .await?; - let active_area = self.get_jobs(GetProverJobsParams::blocks( - L1BatchNumber(limits.successful_limit as u32) - ..L1BatchNumber(limits.queued_limit as u32), - ))?; + let active_area = self + .get_jobs(GetProverJobsParams::blocks( + L1BatchNumber(limits.successful_limit as u32) + ..L1BatchNumber(limits.queued_limit as u32), + )) + .await?; Ok(JobExtendedStatistics { successful_padding: L1BatchNumber(limits.successful_limit as u32 - 1), @@ -409,10 +414,10 @@ impl ProverDal<'_, '_> { queued_padding_len: (limits.max_block - limits.queued_limit) as u32, active_area, }) - }) + } } - pub fn get_jobs( + pub async fn get_jobs( &mut self, opts: GetProverJobsParams, ) -> Result, sqlx::Error> { @@ -492,16 +497,16 @@ impl ProverDal<'_, '_> { let query = sqlx::query_as(&sql); - Ok( - async_std::task::block_on(async move { query.fetch_all(self.storage.conn()).await })? - .into_iter() - .map(|x: StorageProverJobInfo| x.into()) - .collect::>(), - ) + Ok(query + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|x: StorageProverJobInfo| x.into()) + .collect::>()) } - pub fn get_prover_job_by_id(&mut self, job_id: u32) -> Option { - async_std::task::block_on(async { + pub async fn get_prover_job_by_id(&mut self, job_id: u32) -> Option { + { let result: Option = sqlx::query!("SELECT * from prover_jobs where id=$1", job_id as i64) .fetch_optional(self.storage.conn()) @@ -516,11 +521,14 @@ impl ProverDal<'_, '_> { sequence_number: row.sequence_number as usize, }); result - }) + } } - pub fn get_circuit_input_blob_urls_to_be_cleaned(&mut self, limit: u8) -> Vec<(i64, String)> { - async_std::task::block_on(async { + pub async fn get_circuit_input_blob_urls_to_be_cleaned( + &mut self, + limit: u8, + ) -> Vec<(i64, String)> { + { let job_ids = sqlx::query!( r#" SELECT id, circuit_input_blob_url FROM prover_jobs @@ -538,11 +546,11 @@ impl ProverDal<'_, '_> { .into_iter() .map(|row| (row.id, row.circuit_input_blob_url.unwrap())) .collect() - }) + } } - pub fn mark_gcs_blobs_as_cleaned(&mut self, ids: Vec) { - async_std::task::block_on(async { + pub async fn mark_gcs_blobs_as_cleaned(&mut self, ids: Vec) { + { sqlx::query!( r#" UPDATE prover_jobs @@ -554,11 +562,11 @@ impl ProverDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn update_status(&mut self, id: u32, status: &str) { - async_std::task::block_on(async { + pub async fn update_status(&mut self, id: u32, status: &str) { + { sqlx::query!( r#" UPDATE prover_jobs @@ -571,7 +579,7 @@ impl ProverDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } } diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 2414abc8f515..581f070661ca 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,250 +1,239 @@ -use crate::models::storage_contract::StorageContractSource; +use itertools::Itertools; + +use std::{ + collections::{HashMap, HashSet}, + time::Instant, +}; + use crate::StorageProcessor; -use std::collections::{HashMap, HashSet}; -use std::time::Instant; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_types::{ - vm_trace::ContractSourceDebugInfo, Address, MiniblockNumber, StorageKey, StorageLog, - StorageValue, H256, U256, -}; +use zksync_types::{MiniblockNumber, StorageKey, StorageLog, StorageValue, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; #[derive(Debug)] pub struct StorageDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl StorageDal<'_, '_> { - pub fn insert_factory_deps( + /// Inserts factory dependencies for a miniblock. Factory deps are specified as a map of + /// `(bytecode_hash, bytecode)` entries. + pub async fn insert_factory_deps( &mut self, block_number: MiniblockNumber, - factory_deps: HashMap>, + factory_deps: &HashMap>, ) { - async_std::task::block_on(async { - let (bytecode_hashes, bytecodes): (Vec<_>, Vec<_>) = factory_deps - .into_iter() - .map(|dep| (dep.0.as_bytes().into(), dep.1)) - .unzip(); - - // Copy from stdin can't be used here because of 'ON CONFLICT'. - sqlx::query!( - "INSERT INTO factory_deps - (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) - SELECT u.bytecode_hash, u.bytecode, $3, now(), now() - FROM UNNEST($1::bytea[], $2::bytea[]) - AS u(bytecode_hash, bytecode) - ON CONFLICT (bytecode_hash) DO NOTHING - ", - &bytecode_hashes, - &bytecodes, - block_number.0 as i64, - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + let (bytecode_hashes, bytecodes): (Vec<_>, Vec<_>) = factory_deps + .iter() + .map(|dep| (dep.0.as_bytes(), dep.1.as_slice())) + .unzip(); + + // Copy from stdin can't be used here because of 'ON CONFLICT'. + sqlx::query!( + "INSERT INTO factory_deps \ + (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) \ + SELECT u.bytecode_hash, u.bytecode, $3, now(), now() \ + FROM UNNEST($1::bytea[], $2::bytea[]) \ + AS u(bytecode_hash, bytecode) \ + ON CONFLICT (bytecode_hash) DO NOTHING", + &bytecode_hashes as &[&[u8]], + &bytecodes as &[&[u8]], + block_number.0 as i64, + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn get_factory_dep(&mut self, hash: H256) -> Option> { - async_std::task::block_on(async { - sqlx::query!( - "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1", - &hash.0.to_vec(), - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.bytecode) - }) + /// Returns bytecode for a factory dep with the specified bytecode `hash`. + pub async fn get_factory_dep(&mut self, hash: H256) -> Option> { + sqlx::query!( + "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1", + hash.as_bytes(), + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| row.bytecode) } - pub fn get_base_system_contracts( + pub async fn get_base_system_contracts( &mut self, bootloader_hash: H256, default_aa_hash: H256, ) -> BaseSystemContracts { - async_std::task::block_on(async { - let bootloader_bytecode = self - .get_factory_dep(bootloader_hash) - .expect("Bootloader code should be present in the database"); - let bootloader_code = SystemContractCode { - code: bytes_to_be_words(bootloader_bytecode), - hash: bootloader_hash, - }; - - let default_aa_bytecode = self - .get_factory_dep(default_aa_hash) - .expect("Default account code should be present in the database"); - - let default_aa_code = SystemContractCode { - code: bytes_to_be_words(default_aa_bytecode), - hash: default_aa_hash, - }; - BaseSystemContracts { - bootloader: bootloader_code, - default_aa: default_aa_code, - } - }) - } - - pub fn get_factory_deps(&mut self, hashes: &HashSet) -> HashMap> { - let hashes_as_vec_u8: Vec> = hashes.iter().map(|hash| hash.0.to_vec()).collect(); - - async_std::task::block_on(async { - sqlx::query!( - "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)", - &hashes_as_vec_u8, - ) - .fetch_all(self.storage.conn()) + let bootloader_bytecode = self + .get_factory_dep(bootloader_hash) .await - .unwrap() - .into_iter() - .map(|row| { - ( - U256::from_big_endian(&row.bytecode_hash), - bytes_to_chunks(&row.bytecode), - ) - }) - .collect() - }) - } - - pub fn get_factory_deps_for_revert(&mut self, block_number: MiniblockNumber) -> Vec { - async_std::task::block_on(async { - sqlx::query!( - "SELECT bytecode_hash FROM factory_deps WHERE miniblock_number > $1", - block_number.0 as i64 - ) - .fetch_all(self.storage.conn()) + .expect("Bootloader code should be present in the database"); + let bootloader_code = SystemContractCode { + code: bytes_to_be_words(bootloader_bytecode), + hash: bootloader_hash, + }; + + let default_aa_bytecode = self + .get_factory_dep(default_aa_hash) .await - .unwrap() - .into_iter() - .map(|row| H256::from_slice(&row.bytecode_hash)) - .collect() - }) + .expect("Default account code should be present in the database"); + + let default_aa_code = SystemContractCode { + code: bytes_to_be_words(default_aa_bytecode), + hash: default_aa_hash, + }; + BaseSystemContracts { + bootloader: bootloader_code, + default_aa: default_aa_code, + } } - pub fn set_contract_source(&mut self, address: Address, source: ContractSourceDebugInfo) { - async_std::task::block_on(async { - sqlx::query!( - "INSERT INTO contract_sources (address, assembly_code, pc_line_mapping, created_at, updated_at) - VALUES ($1, $2, $3, now(), now()) - ON CONFLICT (address) - DO UPDATE SET assembly_code = $2, pc_line_mapping = $3, updated_at = now() - ", - address.as_bytes(), - source.assembly_code, - serde_json::to_value(source.pc_line_mapping).unwrap() + /// Returns bytecodes for factory deps with the specified `hashes`. + pub async fn get_factory_deps( + &mut self, + hashes: &HashSet, + ) -> HashMap> { + let hashes_as_bytes: Vec<_> = hashes.iter().map(H256::as_bytes).collect(); + + sqlx::query!( + "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)", + &hashes_as_bytes as &[&[u8]], + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + ( + U256::from_big_endian(&row.bytecode_hash), + bytes_to_chunks(&row.bytecode), ) - .execute(self.storage.conn()) - .await - .unwrap(); }) + .collect() } - pub fn get_contract_source(&mut self, address: Address) -> Option { - async_std::task::block_on(async { - let source = sqlx::query_as!( - StorageContractSource, - "SELECT assembly_code, pc_line_mapping FROM contract_sources WHERE address = $1", - address.as_bytes() - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); - source.map(Into::into) - }) + /// Returns bytecode hashes for factory deps from miniblocks with number strictly greater + /// than `block_number`. + pub async fn get_factory_deps_for_revert( + &mut self, + block_number: MiniblockNumber, + ) -> Vec { + sqlx::query!( + "SELECT bytecode_hash FROM factory_deps WHERE miniblock_number > $1", + block_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| H256::from_slice(&row.bytecode_hash)) + .collect() } - // we likely don't need `storage` table at all, as we have `storage_logs` table - // Returns the list of unique storage updates for block - pub fn apply_storage_logs( + /// Applies the specified storage logs for a miniblock. Returns the map of unique storage updates. + // We likely don't need `storage` table at all, as we have `storage_logs` table + pub async fn apply_storage_logs( &mut self, updates: &[(H256, Vec)], - ) -> Vec<(StorageKey, (H256, StorageValue))> { - async_std::task::block_on(async { - let mut unique_updates: HashMap = HashMap::new(); - for (tx_hash, storage_logs) in updates { - for storage_log in storage_logs { - unique_updates.insert(storage_log.key, (*tx_hash, storage_log.value)); - } - } - let unique_updates: Vec<(StorageKey, (H256, StorageValue))> = - unique_updates.into_iter().collect(); - - let hashed_keys: Vec> = unique_updates - .iter() - .map(|(key, _)| key.hashed_key().0.to_vec()) - .collect(); - - let addresses: Vec<_> = unique_updates - .iter() - .map(|(key, _)| key.address().0.to_vec()) - .collect(); - let keys: Vec<_> = unique_updates - .iter() - .map(|(key, _)| key.key().0.to_vec()) - .collect(); - let values: Vec> = unique_updates - .iter() - .map(|(_, (_, value))| value.as_bytes().to_vec()) - .collect(); - - let tx_hashes: Vec> = unique_updates - .iter() - .map(|(_, (tx_hash, _))| tx_hash.0.to_vec()) - .collect(); - - // Copy from stdin can't be used here because of 'ON CONFLICT'. - sqlx::query!( - "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at) - SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now() - FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[]) - AS u(hashed_key, address, key, value, tx_hash) - ON CONFLICT (hashed_key) - DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now() - ", - &hashed_keys, - &addresses, - &keys, - &values, - &tx_hashes, + ) -> HashMap { + let unique_updates: HashMap<_, _> = updates + .iter() + .flat_map(|(tx_hash, storage_logs)| { + storage_logs + .iter() + .map(move |log| (log.key, (*tx_hash, log.value))) + }) + .collect(); + + let query_parts = unique_updates.iter().map(|(key, (tx_hash, value))| { + ( + key.hashed_key().0.to_vec(), + key.address().0.as_slice(), + key.key().0.as_slice(), + value.as_bytes(), + tx_hash.0.as_slice(), ) - .execute(self.storage.conn()) - .await - .unwrap(); - - unique_updates - }) + }); + let (hashed_keys, addresses, keys, values, tx_hashes): ( + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + Vec<_>, + ) = query_parts.multiunzip(); + + // Copy from stdin can't be used here because of 'ON CONFLICT'. + sqlx::query!( + "INSERT INTO storage (hashed_key, address, key, value, tx_hash, created_at, updated_at) \ + SELECT u.hashed_key, u.address, u.key, u.value, u.tx_hash, now(), now() \ + FROM UNNEST ($1::bytea[], $2::bytea[], $3::bytea[], $4::bytea[], $5::bytea[]) \ + AS u(hashed_key, address, key, value, tx_hash) \ + ON CONFLICT (hashed_key) \ + DO UPDATE SET tx_hash = excluded.tx_hash, value = excluded.value, updated_at = now()", + &hashed_keys, + &addresses as &[&[u8]], + &keys as &[&[u8]], + &values as &[&[u8]], + &tx_hashes as &[&[u8]], + ) + .execute(self.storage.conn()) + .await + .unwrap(); + + unique_updates } - pub fn get_by_key(&mut self, key: &StorageKey) -> Option { - async_std::task::block_on(async { - let started_at = Instant::now(); - - let result = sqlx::query!( - "SELECT value FROM storage WHERE hashed_key = $1", - &key.hashed_key().0.to_vec() - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| H256::from_slice(&row.value)); - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_by_key"); + /// Gets the current storage value at the specified `key`. + pub async fn get_by_key(&mut self, key: &StorageKey) -> Option { + let started_at = Instant::now(); + let hashed_key = key.hashed_key(); + let result = sqlx::query!( + "SELECT value FROM storage WHERE hashed_key = $1", + hashed_key.as_bytes() + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| H256::from_slice(&row.value)); + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_by_key"); + result + } - result - }) + /// Removes all factory deps with a miniblock number strictly greater than the specified `block_number`. + pub async fn rollback_factory_deps(&mut self, block_number: MiniblockNumber) { + sqlx::query!( + "DELETE FROM factory_deps WHERE miniblock_number > $1", + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } +} - pub fn rollback_factory_deps(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM factory_deps WHERE miniblock_number > $1", - block_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) +#[cfg(test)] +mod tests { + use super::*; + use crate::ConnectionPool; + use db_test_macro::db_test; + use zksync_types::{AccountTreeId, Address}; + + #[db_test(dal_crate)] + async fn applying_storage_logs(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + + let account = AccountTreeId::new(Address::repeat_byte(1)); + let first_key = StorageKey::new(account, H256::zero()); + let second_key = StorageKey::new(account, H256::from_low_u64_be(1)); + let storage_logs = vec![ + StorageLog::new_write_log(first_key, H256::repeat_byte(1)), + StorageLog::new_write_log(second_key, H256::repeat_byte(2)), + ]; + let updates = [(H256::repeat_byte(1), storage_logs)]; + conn.storage_dal().apply_storage_logs(&updates).await; + + let first_value = conn.storage_dal().get_by_key(&first_key).await.unwrap(); + assert_eq!(first_value, H256::repeat_byte(1)); + let second_value = conn.storage_dal().get_by_key(&second_key).await.unwrap(); + assert_eq!(second_value, H256::repeat_byte(2)); } } diff --git a/core/lib/dal/src/storage_load_dal.rs b/core/lib/dal/src/storage_load_dal.rs deleted file mode 100644 index 8f9a7a75a4c8..000000000000 --- a/core/lib/dal/src/storage_load_dal.rs +++ /dev/null @@ -1,122 +0,0 @@ -use crate::StorageProcessor; -use std::time::Instant; -use zksync_state::secondary_storage::SecondaryStateStorage; -use zksync_storage::RocksDB; -use zksync_types::{ - AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, ACCOUNT_CODE_STORAGE_ADDRESS, - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, -}; - -#[derive(Debug)] -pub struct StorageLoadDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, -} - -impl StorageLoadDal<'_, '_> { - pub fn load_secondary_storage(&mut self, db: RocksDB) -> SecondaryStateStorage { - async_std::task::block_on(async { - let stage_started_at: Instant = Instant::now(); - let latest_l1_batch_number = self.storage.blocks_dal().get_sealed_block_number(); - vlog::debug!( - "loading storage for l1 batch number {}", - latest_l1_batch_number.0 - ); - - let mut result = SecondaryStateStorage::new(db); - let mut current_l1_batch_number = result.get_l1_batch_number().0; - - assert!( - current_l1_batch_number <= latest_l1_batch_number.0 + 1, - "L1 batch number in state keeper cache is greater than last sealed L1 batch number in Postgres" - ); - while current_l1_batch_number <= latest_l1_batch_number.0 { - let (from_miniblock_number, to_miniblock_number) = self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(L1BatchNumber(current_l1_batch_number)) - .expect("L1 batch should contain at least one miniblock"); - - vlog::debug!( - "loading state changes for l1 batch {}", - current_l1_batch_number - ); - let storage_logs: Vec<_> = sqlx::query!( - " - SELECT address, key, value FROM storage_logs - WHERE miniblock_number >= $1 AND miniblock_number <= $2 - ORDER BY miniblock_number, operation_number ASC - ", - from_miniblock_number.0 as i64, - to_miniblock_number.0 as i64, - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| { - StorageLog::new_write_log( - StorageKey::new( - AccountTreeId::new(Address::from_slice(&row.address)), - H256::from_slice(&row.key), - ), - H256::from_slice(&row.value), - ) - }) - .collect(); - result.process_transaction_logs(&storage_logs); - - vlog::debug!( - "loading deployed contracts for l1 batch {}", - current_l1_batch_number - ); - - vlog::debug!( - "loading factory deps for l1 batch {}", - current_l1_batch_number - ); - sqlx::query!( - "SELECT bytecode_hash, bytecode FROM factory_deps - WHERE miniblock_number >= $1 AND miniblock_number <= $2", - from_miniblock_number.0 as i64, - to_miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .for_each(|row| { - result.store_factory_dep(H256::from_slice(&row.bytecode_hash), row.bytecode) - }); - - current_l1_batch_number += 1; - result.save(L1BatchNumber(current_l1_batch_number)); - } - - metrics::histogram!( - "server.state_keeper.update_secondary_storage", - stage_started_at.elapsed() - ); - result - }) - } - - pub fn load_number_of_contracts(&mut self) -> u64 { - async_std::task::block_on(async { - sqlx::query!( - "SELECT count(*) - FROM storage - WHERE - address = $1 AND - value != $2 - ", - ACCOUNT_CODE_STORAGE_ADDRESS.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count - .unwrap() as u64 - }) - } -} diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 8bcf159a6494..c8d684ea88b0 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -1,6 +1,8 @@ -use crate::StorageProcessor; use sqlx::types::chrono::Utc; -use std::collections::HashMap; + +use std::{collections::HashMap, time::Instant}; + +use crate::StorageProcessor; use zksync_types::{ get_code_key, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, @@ -8,343 +10,629 @@ use zksync_types::{ #[derive(Debug)] pub struct StorageLogsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl StorageLogsDal<'_, '_> { - pub fn insert_storage_logs( + /// Inserts storage logs grouped by transaction for a miniblock. The ordering of transactions + /// must be the same as their ordering in the miniblock. + pub async fn insert_storage_logs( &mut self, block_number: MiniblockNumber, logs: &[(H256, Vec)], ) { - async_std::task::block_on(async { - let mut copy = self + self.insert_storage_logs_inner(block_number, logs, 0).await; + } + + async fn insert_storage_logs_inner( + &mut self, + block_number: MiniblockNumber, + logs: &[(H256, Vec)], + mut operation_number: u32, + ) { + let mut copy = self .storage .conn() .copy_in_raw( - "COPY storage_logs (hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, created_at, updated_at) + "COPY storage_logs( + hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, + created_at, updated_at + ) FROM STDIN WITH (DELIMITER '|')", ) .await .unwrap(); - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - let mut operation_number = 0u32; - for (tx_hash, logs) in logs { - let tx_hash_str = format!("\\\\x{}", hex::encode(tx_hash.0)); - for log in logs { - let hashed_key_str = format!("\\\\x{}", hex::encode(log.key.hashed_key().0)); - let address_str = format!("\\\\x{}", hex::encode(log.key.address().0)); - let key_str = format!("\\\\x{}", hex::encode(log.key.key().0)); - let value_str = format!("\\\\x{}", hex::encode(log.value.0)); - let row = format!( - "{}|{}|{}|{}|{}|{}|{}|{}|{}\n", - hashed_key_str, - address_str, - key_str, - value_str, - operation_number, - tx_hash_str, - block_number, - now, - now - ); - bytes.extend_from_slice(row.as_bytes()); - - operation_number += 1; - } + let mut buffer = String::new(); + let now = Utc::now().naive_utc().to_string(); + for (tx_hash, logs) in logs { + for log in logs { + write_str!( + &mut buffer, + r"\\x{hashed_key:x}|\\x{address:x}|\\x{key:x}|\\x{value:x}|", + hashed_key = log.key.hashed_key(), + address = log.key.address(), + key = log.key.key(), + value = log.value + ); + writeln_str!( + &mut buffer, + r"{operation_number}|\\x{tx_hash:x}|{block_number}|{now}|{now}" + ); + + operation_number += 1; } - copy.send(bytes).await.unwrap(); - copy.finish().await.unwrap(); - }) + } + copy.send(buffer.as_bytes()).await.unwrap(); + copy.finish().await.unwrap(); } - pub fn append_storage_logs( + pub async fn append_storage_logs( &mut self, block_number: MiniblockNumber, logs: &[(H256, Vec)], ) { - async_std::task::block_on(async { - let mut operation_number = sqlx::query!( - r#"SELECT COUNT(*) as "count!" FROM storage_logs WHERE miniblock_number = $1"#, - block_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count as u32; - - let mut copy = self - .storage - .conn() - .copy_in_raw( - "COPY storage_logs (hashed_key, address, key, value, operation_number, tx_hash, miniblock_number, created_at, updated_at) - FROM STDIN WITH (DELIMITER '|')", - ) - .await - .unwrap(); - - let mut bytes: Vec = Vec::new(); - let now = Utc::now().naive_utc().to_string(); - for (tx_hash, logs) in logs { - let tx_hash_str = format!("\\\\x{}", hex::encode(tx_hash.0)); - for log in logs { - let hashed_key_str = format!("\\\\x{}", hex::encode(log.key.hashed_key().0)); - let address_str = format!("\\\\x{}", hex::encode(log.key.address().0)); - let key_str = format!("\\\\x{}", hex::encode(log.key.key().0)); - let value_str = format!("\\\\x{}", hex::encode(log.value.0)); - let row = format!( - "{}|{}|{}|{}|{}|{}|{}|{}|{}\n", - hashed_key_str, - address_str, - key_str, - value_str, - operation_number, - tx_hash_str, - block_number, - now, - now - ); - bytes.extend_from_slice(row.as_bytes()); - - operation_number += 1; - } - } - copy.send(bytes).await.unwrap(); - copy.finish().await.unwrap(); - }) + let operation_number = sqlx::query!( + "SELECT COUNT(*) as \"count!\" FROM storage_logs WHERE miniblock_number = $1", + block_number.0 as i64 + ) + .fetch_one(self.storage.conn()) + .await + .unwrap() + .count as u32; + + self.insert_storage_logs_inner(block_number, logs, operation_number) + .await; } - pub fn rollback_storage(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - vlog::info!("fetching keys that were changed after given block number"); - let modified_keys: Vec = sqlx::query!( - "SELECT DISTINCT ON (hashed_key) hashed_key FROM - (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn", - block_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| H256::from_slice(&row.hashed_key)) - .collect(); - vlog::info!("loaded {:?} keys", modified_keys.len()); + /// Rolls back storage to the specified point in time. + pub async fn rollback_storage(&mut self, last_miniblock_to_keep: MiniblockNumber) { + let stage_start = Instant::now(); + let modified_keys = self + .modified_keys_since_miniblock(last_miniblock_to_keep) + .await; + vlog::info!( + "Loaded {} keys changed after miniblock #{last_miniblock_to_keep} in {:?}", + modified_keys.len(), + stage_start.elapsed() + ); - for key in modified_keys { - let previous_value: Option = sqlx::query!( - "select value from storage_logs where hashed_key = $1 and miniblock_number <= $2 order by miniblock_number desc, operation_number desc limit 1", - key.as_bytes(), - block_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|r| H256::from_slice(&r.value)); - match previous_value { - None => { - sqlx::query!("delete from storage where hashed_key = $1", key.as_bytes(),) - .execute(self.storage.conn()) - .await - .unwrap() - } - Some(val) => sqlx::query!( - "update storage set value = $1 where hashed_key = $2", - val.as_bytes(), - key.as_bytes(), - ) - .execute(self.storage.conn()) - .await - .unwrap(), - }; + let stage_start = Instant::now(); + let prev_values = self + .get_storage_values(&modified_keys, last_miniblock_to_keep) + .await; + vlog::info!( + "Loaded previous storage values for modified keys in {:?}", + stage_start.elapsed() + ); + + let stage_start = Instant::now(); + let mut keys_to_delete = vec![]; + let mut keys_to_update = vec![]; + let mut values_to_update = vec![]; + for (key, maybe_value) in &prev_values { + if let Some(prev_value) = maybe_value { + keys_to_update.push(key.as_bytes()); + values_to_update.push(prev_value.as_bytes()); + } else { + keys_to_delete.push(key.as_bytes()); } - }) + } + vlog::info!( + "Created revert plan (keys to update: {}, to delete: {}) in {:?}", + keys_to_update.len(), + keys_to_delete.len(), + stage_start.elapsed() + ); + + let stage_start = Instant::now(); + sqlx::query!( + "DELETE FROM storage WHERE hashed_key = ANY($1)", + &keys_to_delete as &[&[u8]], + ) + .execute(self.storage.conn()) + .await + .unwrap(); + vlog::info!( + "Removed {} keys in {:?}", + keys_to_delete.len(), + stage_start.elapsed() + ); + + let stage_start = Instant::now(); + sqlx::query!( + "UPDATE storage SET value = u.value \ + FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) \ + WHERE u.key = hashed_key", + &keys_to_update as &[&[u8]], + &values_to_update as &[&[u8]], + ) + .execute(self.storage.conn()) + .await + .unwrap(); + vlog::info!( + "Updated {} keys to previous values in {:?}", + keys_to_update.len(), + stage_start.elapsed() + ); } - pub fn rollback_storage_logs(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { - sqlx::query!( - "DELETE FROM storage_logs WHERE miniblock_number > $1", - block_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - }) + /// Returns all storage keys that were modified after the specified miniblock. + async fn modified_keys_since_miniblock( + &mut self, + miniblock_number: MiniblockNumber, + ) -> Vec { + sqlx::query!( + "SELECT DISTINCT ON (hashed_key) hashed_key FROM \ + (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn", + miniblock_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| H256::from_slice(&row.hashed_key)) + .collect() + } + + /// Removes all storage logs with a miniblock number strictly greater than the specified `block_number`. + pub async fn rollback_storage_logs(&mut self, block_number: MiniblockNumber) { + sqlx::query!( + "DELETE FROM storage_logs WHERE miniblock_number > $1", + block_number.0 as i64 + ) + .execute(self.storage.conn()) + .await + .unwrap(); } - pub fn is_contract_deployed_at_address(&mut self, address: Address) -> bool { + pub async fn is_contract_deployed_at_address(&mut self, address: Address) -> bool { let hashed_key = get_code_key(&address).hashed_key(); - async_std::task::block_on(async { - let count = sqlx::query!( - r#" - SELECT COUNT(*) as "count!" - FROM ( - SELECT * FROM storage_logs - WHERE storage_logs.hashed_key = $1 - ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC - LIMIT 1 - ) sl - WHERE sl.value != $2 - "#, - hashed_key.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count; - count > 0 - }) + let row = sqlx::query!( + "SELECT COUNT(*) as \"count!\" \ + FROM (\ + SELECT * FROM storage_logs \ + WHERE storage_logs.hashed_key = $1 \ + ORDER BY storage_logs.miniblock_number DESC, storage_logs.operation_number DESC \ + LIMIT 1\ + ) sl \ + WHERE sl.value != $2", + hashed_key.as_bytes(), + FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), + ) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + + row.count > 0 } - pub fn get_touched_slots_for_l1_batch( + /// Returns latest values for all [`StorageKey`]s written to in the specified L1 batch + /// judging by storage logs (i.e., not taking deduplication logic into account). + pub async fn get_touched_slots_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, ) -> HashMap { - async_std::task::block_on(async { - let storage_logs = sqlx::query!( - " - SELECT address, key, value - FROM storage_logs - WHERE miniblock_number BETWEEN (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) - AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) - ORDER BY miniblock_number, operation_number - ", - l1_batch_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap(); - - let mut touched_slots = HashMap::new(); - for storage_log in storage_logs.into_iter() { - touched_slots.insert( - StorageKey::new( - AccountTreeId::new(Address::from_slice(&storage_log.address)), - H256::from_slice(&storage_log.key), - ), - H256::from_slice(&storage_log.value), - ); - } - touched_slots - }) + let rows = sqlx::query!( + "SELECT address, key, value \ + FROM storage_logs \ + WHERE miniblock_number BETWEEN \ + (SELECT MIN(number) FROM miniblocks WHERE l1_batch_number = $1) \ + AND (SELECT MAX(number) FROM miniblocks WHERE l1_batch_number = $1) \ + ORDER BY miniblock_number, operation_number", + l1_batch_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + let touched_slots = rows.into_iter().map(|row| { + let key = StorageKey::new( + AccountTreeId::new(Address::from_slice(&row.address)), + H256::from_slice(&row.key), + ); + (key, H256::from_slice(&row.value)) + }); + touched_slots.collect() } - pub fn get_storage_logs_for_revert( + /// Returns (hashed) storage keys and the corresponding values that need to be applied to a storage + /// in order to revert it to the specified L1 batch. Deduplication is taken into account. + pub async fn get_storage_logs_for_revert( &mut self, l1_batch_number: L1BatchNumber, - ) -> Vec<(H256, Option)> { - async_std::task::block_on(async { - let miniblock_number = match self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(l1_batch_number) - { - None => return Vec::new(), - Some((_, number)) => number, - }; - - vlog::info!("fetching keys that were changed after given block number"); - let modified_keys: Vec = sqlx::query!( - "SELECT DISTINCT ON (hashed_key) hashed_key FROM - (SELECT * FROM storage_logs WHERE miniblock_number > $1) inn", - miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| H256::from_slice(&row.hashed_key)) - .collect(); - vlog::info!("loaded {:?} keys", modified_keys.len()); + ) -> HashMap> { + let miniblock_range = self + .storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(l1_batch_number) + .await; + let Some((_, last_miniblock)) = miniblock_range else { + return HashMap::new(); + }; - let mut result: Vec<(H256, Option)> = vec![]; + let stage_start = Instant::now(); + let mut modified_keys = self.modified_keys_since_miniblock(last_miniblock).await; + let modified_keys_count = modified_keys.len(); + vlog::info!( + "Fetched {modified_keys_count} keys changed after miniblock #{last_miniblock} in {:?}", + stage_start.elapsed() + ); - for key in modified_keys { - let initially_written_at: Option = sqlx::query!( - " - SELECT l1_batch_number FROM initial_writes - WHERE hashed_key = $1 - ", - key.as_bytes(), - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - match initially_written_at { - // Key isn't written to the storage - nothing to rollback. - None => continue, - // Key was initially written, it's needed to remove it. - Some(initially_written_at) if initially_written_at > l1_batch_number => { - result.push((key, None)); - } - // Key was rewritten, it's needed to restore the previous value. - Some(_) => { - let previous_value: Vec = sqlx::query!( - " - SELECT value FROM storage_logs - WHERE hashed_key = $1 AND miniblock_number <= $2 - ORDER BY miniblock_number DESC, operation_number DESC - LIMIT 1 - ", - key.as_bytes(), - miniblock_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .value; - result.push((key, Some(H256::from_slice(&previous_value)))); - } + // We need to filter `modified_keys` using the `initial_writes` table (i.e., take dedup logic + // into account). Some keys that have `storage_logs` entries are actually never written to + // as per `initial_writes`, so if we return such keys from this method, it will lead to + // the incorrect state after revert. + let stage_start = Instant::now(); + let l1_batch_by_key = self.get_l1_batches_for_initial_writes(&modified_keys).await; + vlog::info!( + "Loaded initial write info for modified keys in {:?}", + stage_start.elapsed() + ); + + let stage_start = Instant::now(); + let mut output = HashMap::with_capacity(modified_keys.len()); + modified_keys.retain(|key| { + match l1_batch_by_key.get(key) { + None => { + // Key is completely deduped. It should not be present in the output map. + false } - if result.len() % 1000 == 0 { - vlog::info!("processed {:?} values", result.len()); + Some(write_batch) if *write_batch > l1_batch_number => { + // Key was initially written to after the specified L1 batch. + output.insert(*key, None); + false } + Some(_) => true, } + }); + vlog::info!( + "Filtered modified keys per initial writes in {:?}", + stage_start.elapsed() + ); - result - }) + let deduped_count = modified_keys_count - l1_batch_by_key.len(); + vlog::info!( + "Keys to update: {update_count}, to delete: {delete_count}; {deduped_count} modified keys \ + are deduped and will be ignored", + update_count = modified_keys.len(), + delete_count = l1_batch_by_key.len() - modified_keys.len() + ); + + let stage_start = Instant::now(); + let prev_values_for_updated_keys = self + .get_storage_values(&modified_keys, last_miniblock) + .await; + vlog::info!( + "Loaded previous values for {} keys in {:?}", + prev_values_for_updated_keys.len(), + stage_start.elapsed() + ); + output.extend(prev_values_for_updated_keys); + output } - pub fn get_previous_storage_values( + async fn get_l1_batches_for_initial_writes( &mut self, - hashed_keys: Vec, - l1_batch_number: L1BatchNumber, - ) -> HashMap { - async_std::task::block_on(async { - let hashed_keys: Vec<_> = hashed_keys.into_iter().map(|key| key.0.to_vec()).collect(); - let (miniblock_number, _) = self - .storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(l1_batch_number) - .unwrap(); - sqlx::query!( - r#" - SELECT u.hashed_key as "hashed_key!", - (SELECT value FROM storage_logs - WHERE hashed_key = u.hashed_key AND miniblock_number < $2 - ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as "value?" - FROM UNNEST($1::bytea[]) AS u(hashed_key) - "#, - &hashed_keys, - miniblock_number.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() + hashed_keys: &[H256], + ) -> HashMap { + let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); + + let rows = sqlx::query!( + "SELECT hashed_key, l1_batch_number FROM initial_writes \ + WHERE hashed_key = ANY($1::bytea[])", + &hashed_keys as &[&[u8]], + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + rows.into_iter() .map(|row| { ( H256::from_slice(&row.hashed_key), - row.value - .map(|value| H256::from_slice(&value)) - .unwrap_or_else(H256::zero), + L1BatchNumber(row.l1_batch_number as u32), ) }) .collect() - }) + } + + /// Gets previous values for the specified storage keys before the specified L1 batch number. + /// + /// # Return value + /// + /// The returned map is guaranteed to contain all unique keys from `hashed_keys`. + pub async fn get_previous_storage_values( + &mut self, + hashed_keys: &[H256], + next_l1_batch: L1BatchNumber, + ) -> HashMap> { + let (miniblock_number, _) = self + .storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(next_l1_batch) + .await + .unwrap(); + + if miniblock_number == MiniblockNumber(0) { + hashed_keys.iter().copied().map(|key| (key, None)).collect() + } else { + self.get_storage_values(hashed_keys, miniblock_number - 1) + .await + } + } + + /// Returns current values for the specified keys at the specified `miniblock_number`. + async fn get_storage_values( + &mut self, + hashed_keys: &[H256], + miniblock_number: MiniblockNumber, + ) -> HashMap> { + let hashed_keys: Vec<_> = hashed_keys.iter().map(H256::as_bytes).collect(); + + let rows = sqlx::query!( + "SELECT u.hashed_key as \"hashed_key!\", \ + (SELECT value FROM storage_logs \ + WHERE hashed_key = u.hashed_key AND miniblock_number <= $2 \ + ORDER BY miniblock_number DESC, operation_number DESC LIMIT 1) as \"value?\" \ + FROM UNNEST($1::bytea[]) AS u(hashed_key)", + &hashed_keys as &[&[u8]], + miniblock_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap(); + + rows.into_iter() + .map(|row| { + let key = H256::from_slice(&row.hashed_key); + let value = row.value.map(|value| H256::from_slice(&value)); + (key, value) + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{tests::create_miniblock_header, ConnectionPool}; + use db_test_macro::db_test; + use zksync_contracts::BaseSystemContractsHashes; + use zksync_types::{ + block::{BlockGasCount, L1BatchHeader}, + zk_evm::aux_structures::{LogQuery, Timestamp}, + U256, + }; + + async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { + let mut header = L1BatchHeader::new( + L1BatchNumber(number), + 0, + Address::default(), + BaseSystemContractsHashes::default(), + ); + header.is_finished = true; + conn.blocks_dal() + .insert_l1_batch(&header, BlockGasCount::default()) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(number)) + .await; + + let logs = [(H256::zero(), logs)]; + conn.storage_logs_dal() + .insert_storage_logs(MiniblockNumber(number), &logs) + .await; + conn.storage_dal().apply_storage_logs(&logs).await; + conn.blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(number)) + .await; + } + + #[db_test(dal_crate)] + async fn inserting_storage_logs(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + + let account = AccountTreeId::new(Address::repeat_byte(1)); + let first_key = StorageKey::new(account, H256::zero()); + let second_key = StorageKey::new(account, H256::from_low_u64_be(1)); + let log = StorageLog::new_write_log(first_key, H256::repeat_byte(1)); + let other_log = StorageLog::new_write_log(second_key, H256::repeat_byte(2)); + insert_miniblock(&mut conn, 1, vec![log, other_log]).await; + + let touched_slots = conn + .storage_logs_dal() + .get_touched_slots_for_l1_batch(L1BatchNumber(1)) + .await; + assert_eq!(touched_slots.len(), 2); + assert_eq!(touched_slots[&first_key], H256::repeat_byte(1)); + assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); + + // Add more logs and check log ordering. + let third_log = StorageLog::new_write_log(first_key, H256::repeat_byte(3)); + let more_logs = [(H256::repeat_byte(1), vec![third_log])]; + conn.storage_logs_dal() + .append_storage_logs(MiniblockNumber(1), &more_logs) + .await; + conn.storage_dal().apply_storage_logs(&more_logs).await; + + let touched_slots = conn + .storage_logs_dal() + .get_touched_slots_for_l1_batch(L1BatchNumber(1)) + .await; + assert_eq!(touched_slots.len(), 2); + assert_eq!(touched_slots[&first_key], H256::repeat_byte(3)); + assert_eq!(touched_slots[&second_key], H256::repeat_byte(2)); + + test_rollback(&mut conn, first_key, second_key).await; + } + + async fn test_rollback( + conn: &mut StorageProcessor<'_>, + key: StorageKey, + second_key: StorageKey, + ) { + let new_account = AccountTreeId::new(Address::repeat_byte(2)); + let new_key = StorageKey::new(new_account, H256::zero()); + let log = StorageLog::new_write_log(key, H256::repeat_byte(0xff)); + let other_log = StorageLog::new_write_log(second_key, H256::zero()); + let new_key_log = StorageLog::new_write_log(new_key, H256::repeat_byte(0xfe)); + let logs = vec![log, other_log, new_key_log]; + insert_miniblock(conn, 2, logs).await; + + let value = conn.storage_dal().get_by_key(&key).await.unwrap(); + assert_eq!(value, H256::repeat_byte(0xff)); + let value = conn.storage_dal().get_by_key(&second_key).await.unwrap(); + assert_eq!(value, H256::zero()); + let value = conn.storage_dal().get_by_key(&new_key).await.unwrap(); + assert_eq!(value, H256::repeat_byte(0xfe)); + + let prev_keys = vec![key.hashed_key(), new_key.hashed_key(), H256::zero()]; + let prev_values = conn + .storage_logs_dal() + .get_previous_storage_values(&prev_keys, L1BatchNumber(2)) + .await; + assert_eq!(prev_values.len(), 3); + assert_eq!(prev_values[&prev_keys[0]], Some(H256::repeat_byte(3))); + assert_eq!(prev_values[&prev_keys[1]], None); + assert_eq!(prev_values[&prev_keys[2]], None); + + conn.storage_logs_dal() + .rollback_storage(MiniblockNumber(1)) + .await; + + let value = conn.storage_dal().get_by_key(&key).await.unwrap(); + assert_eq!(value, H256::repeat_byte(3)); + let value = conn.storage_dal().get_by_key(&second_key).await.unwrap(); + assert_eq!(value, H256::repeat_byte(2)); + let value = conn.storage_dal().get_by_key(&new_key).await; + assert!(value.is_none()); + } + + #[db_test(dal_crate)] + async fn getting_storage_logs_for_revert(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + + let account = AccountTreeId::new(Address::repeat_byte(1)); + let logs: Vec<_> = (0_u8..10) + .map(|i| { + let key = StorageKey::new(account, H256::from_low_u64_be(u64::from(i))); + StorageLog::new_write_log(key, H256::repeat_byte(i)) + }) + .collect(); + insert_miniblock(&mut conn, 1, logs.clone()).await; + let queries: Vec<_> = logs.iter().map(write_log_to_query).collect(); + conn.storage_logs_dedup_dal() + .insert_initial_writes(L1BatchNumber(1), &queries) + .await; + + let new_logs: Vec<_> = (5_u64..20) + .map(|i| { + let key = StorageKey::new(account, H256::from_low_u64_be(i)); + StorageLog::new_write_log(key, H256::from_low_u64_be(i)) + }) + .collect(); + insert_miniblock(&mut conn, 2, new_logs.clone()).await; + let new_queries: Vec<_> = new_logs[5..].iter().map(write_log_to_query).collect(); + conn.storage_logs_dedup_dal() + .insert_initial_writes(L1BatchNumber(2), &new_queries) + .await; + + let logs_for_revert = conn + .storage_logs_dal() + .get_storage_logs_for_revert(L1BatchNumber(1)) + .await; + assert_eq!(logs_for_revert.len(), 15); // 5 updated + 10 new keys + for log in &logs[5..] { + let prev_value = logs_for_revert[&log.key.hashed_key()].unwrap(); + assert_eq!(prev_value, log.value); + } + for log in &new_logs[5..] { + assert!(logs_for_revert[&log.key.hashed_key()].is_none()); + } + } + + fn write_log_to_query(log: &StorageLog) -> LogQuery { + LogQuery { + timestamp: Timestamp(0), + tx_number_in_block: 0, + aux_byte: 0, + shard_id: 0, + address: *log.key.address(), + key: U256::from_big_endian(log.key.key().as_bytes()), + read_value: U256::zero(), + written_value: U256::from_big_endian(log.value.as_bytes()), + rw_flag: true, + rollback: false, + is_service: false, + } + } + + #[db_test(dal_crate)] + async fn reverting_keys_without_initial_write(pool: ConnectionPool) { + let mut conn = pool.access_storage().await; + + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.blocks_dal().delete_l1_batches(L1BatchNumber(0)).await; + + let account = AccountTreeId::new(Address::repeat_byte(1)); + let mut logs: Vec<_> = [0_u8, 1, 2, 3] + .iter() + .map(|&i| { + let key = StorageKey::new(account, H256::from_low_u64_be(u64::from(i))); + StorageLog::new_write_log(key, H256::repeat_byte(i % 3)) + }) + .collect(); + + for l1_batch in [1, 2] { + if l1_batch == 2 { + for log in &mut logs[1..] { + log.value = H256::repeat_byte(0xff); + } + } + insert_miniblock(&mut conn, l1_batch, logs.clone()).await; + + // Pretend that dedup logic eliminates all writes with zero values. + let queries: Vec<_> = logs + .iter() + .filter_map(|log| (!log.value.is_zero()).then(|| write_log_to_query(log))) + .collect(); + assert!(queries.len() < logs.len()); + conn.storage_logs_dedup_dal() + .insert_initial_writes(L1BatchNumber(l1_batch), &queries) + .await; + } + + let logs_for_revert = conn + .storage_logs_dal() + .get_storage_logs_for_revert(L1BatchNumber(1)) + .await; + assert_eq!(logs_for_revert.len(), 3); + for (i, log) in logs.iter().enumerate() { + let hashed_key = log.key.hashed_key(); + match i { + // Key is deduped. + 0 => assert!(!logs_for_revert.contains_key(&hashed_key)), + // Key is present in both batches as per `storage_logs` and `initial_writes` + 1 | 2 => assert!(logs_for_revert[&hashed_key].is_some()), + // Key is present in both batches as per `storage_logs`, but `initial_writes` + // indicates that the first write was deduped. + 3 => assert!(logs_for_revert[&hashed_key].is_none()), + _ => unreachable!("we only have 4 keys"), + } + } } } diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index d54a2e2970c2..751f81d135ba 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,22 +1,23 @@ use crate::StorageProcessor; use sqlx::types::chrono::Utc; use std::collections::HashSet; -use vm::zk_evm::aux_structures::LogQuery; -use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; +use zksync_types::{ + zk_evm::aux_structures::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, +}; use zksync_utils::u256_to_h256; #[derive(Debug)] pub struct StorageLogsDedupDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl StorageLogsDedupDal<'_, '_> { - pub fn insert_protective_reads( + pub async fn insert_protective_reads( &mut self, l1_batch_number: L1BatchNumber, read_logs: &[LogQuery], ) { - async_std::task::block_on(async { + { let mut copy = self .storage .conn() @@ -40,15 +41,15 @@ impl StorageLogsDedupDal<'_, '_> { } copy.send(bytes).await.unwrap(); copy.finish().await.unwrap(); - }) + } } - pub fn insert_initial_writes( + pub async fn insert_initial_writes( &mut self, l1_batch_number: L1BatchNumber, write_logs: &[LogQuery], ) { - async_std::task::block_on(async { + { let hashed_keys: Vec<_> = write_logs .iter() .map(|log| { @@ -68,14 +69,14 @@ impl StorageLogsDedupDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_protective_reads_for_l1_batch( + pub async fn get_protective_reads_for_l1_batch( &mut self, l1_batch_number: L1BatchNumber, ) -> HashSet { - async_std::task::block_on(async { + { sqlx::query!( " SELECT address, key FROM protective_reads @@ -94,6 +95,6 @@ impl StorageLogsDedupDal<'_, '_> { ) }) .collect() - }) + } } } diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index e1f4491b5861..63e7cae24265 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,68 +1,55 @@ -use crate::{SqlxError, StorageProcessor}; use std::time::Instant; + use zksync_types::{ - api::BlockId, get_code_key, get_nonce_key, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, - AccountTreeId, Address, StorageKey, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, + FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, }; use zksync_utils::h256_to_u256; -use zksync_web3_decl::error::Web3Error; + +use crate::{SqlxError, StorageProcessor}; #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl StorageWeb3Dal<'_, '_> { - pub fn get_address_historical_nonce( + pub async fn get_address_historical_nonce( &mut self, address: Address, - block_id: BlockId, - ) -> Result, SqlxError> { + block_number: MiniblockNumber, + ) -> Result { let nonce_key = get_nonce_key(&address); - let nonce = self.get_historical_value(&nonce_key, block_id)?.map(|n| { - let full_nonce = h256_to_u256(n); - decompose_full_nonce(full_nonce).0 - }); - Ok(nonce) + let nonce_value = self + .get_historical_value_unchecked(&nonce_key, block_number) + .await?; + let full_nonce = h256_to_u256(nonce_value); + Ok(decompose_full_nonce(full_nonce).0) } - pub fn standard_token_historical_balance( + pub async fn standard_token_historical_balance( &mut self, token_id: AccountTreeId, account_id: AccountTreeId, - block_id: BlockId, - ) -> Result, SqlxError> { + block_number: MiniblockNumber, + ) -> Result { let key = storage_key_for_standard_token_balance(token_id, account_id.address()); - - let balance = self.get_historical_value(&key, block_id)?; - Ok(balance.map(h256_to_u256)) - } - - pub fn get_historical_value( - &mut self, - key: &StorageKey, - block_id: BlockId, - ) -> Result, SqlxError> { - let block_number = self.storage.blocks_web3_dal().resolve_block_id(block_id)?; - match block_number { - Ok(block_number) => { - let value = self.get_historical_value_unchecked(key, block_number)?; - Ok(Ok(value)) - } - Err(err) => Ok(Err(err)), - } + let balance = self + .get_historical_value_unchecked(&key, block_number) + .await?; + Ok(h256_to_u256(balance)) } /// This method does not check if a block with this number exists in the database. /// It will return the current value if the block is in the future. - pub fn get_historical_value_unchecked( + pub async fn get_historical_value_unchecked( &mut self, key: &StorageKey, - block_number: zksync_types::MiniblockNumber, + block_number: MiniblockNumber, ) -> Result { - async_std::task::block_on(async { + { let started_at = Instant::now(); // We need to proper distinguish if the value is zero or None // for the VM to correctly determine initial writes. @@ -88,73 +75,60 @@ impl StorageWeb3Dal<'_, '_> { metrics::histogram!("dal.request", started_at.elapsed(), "method" => "get_historical_value_unchecked"); result - }) + } } - pub fn is_write_initial( + /// Gets the L1 batch number that the miniblock has now or will have in the future (provided + /// that the node will operate correctly). Assumes that the miniblock is present in the DB; + /// this is not checked, and if this is false, the returned value will be meaningless. + pub async fn get_provisional_l1_batch_number_of_miniblock_unchecked( &mut self, - key: &StorageKey, - block_number: zksync_types::MiniblockNumber, - consider_new_l1_batch: bool, - ) -> Result { - async_std::task::block_on(async { - let started_at = Instant::now(); - let row = sqlx::query!( - r#" - SELECT (SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1) as "initial_write_l1_batch_number?", - (SELECT miniblocks.l1_batch_number FROM miniblocks WHERE number = $2) as "current_l1_batch_number?" - "#, - key.hashed_key().0.to_vec(), - block_number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await?; - // Note: if `row.current_l1_batch_number` is `None` it means - // that the l1 batch that the miniblock is included in isn't sealed yet. - let is_initial = match ( - row.current_l1_batch_number, - row.initial_write_l1_batch_number, - ) { - (_, None) => true, - (Some(current_l1_batch_number), Some(initial_write_l1_batch_number)) => { - if consider_new_l1_batch { - current_l1_batch_number < initial_write_l1_batch_number - } else { - current_l1_batch_number <= initial_write_l1_batch_number - } - } - (None, Some(_initial_write_l1_batch_number)) => false, - }; - metrics::histogram!("dal.request", started_at.elapsed(), "method" => "is_write_initial"); + miniblock_number: MiniblockNumber, + ) -> Result { + let row = sqlx::query!( + "SELECT \ + (SELECT l1_batch_number FROM miniblocks WHERE number = $1) as \"block_batch?\", \ + (SELECT MAX(number) + 1 FROM l1_batches) as \"max_batch?\"", + miniblock_number.0 as i64 + ) + .fetch_one(self.storage.conn()) + .await?; - Ok(is_initial) - }) + let batch_number = row.block_batch.or(row.max_batch).unwrap_or(0); + Ok(L1BatchNumber(batch_number as u32)) } - pub fn get_contract_code( + pub async fn get_l1_batch_number_for_initial_write( &mut self, - address: Address, - block_id: BlockId, - ) -> Result>, Web3Error>, SqlxError> { - let block_number = self.storage.blocks_web3_dal().resolve_block_id(block_id)?; - match block_number { - Ok(block_number) => { - let code = self.get_contract_code_unchecked(address, block_number)?; - Ok(Ok(code)) - } - Err(err) => Ok(Err(err)), - } + key: &StorageKey, + ) -> Result, SqlxError> { + let started_at = Instant::now(); + let hashed_key = key.hashed_key(); + let row = sqlx::query!( + "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1", + hashed_key.as_bytes(), + ) + .fetch_optional(self.storage.conn()) + .await?; + + let l1_batch_number = row.map(|record| L1BatchNumber(record.l1_batch_number as u32)); + metrics::histogram!( + "dal.request", + started_at.elapsed(), + "method" => "get_l1_batch_number_for_initial_write" + ); + Ok(l1_batch_number) } /// This method doesn't check if block with number equals to `block_number` /// is present in the database. For such blocks `None` will be returned. - pub fn get_contract_code_unchecked( + pub async fn get_contract_code_unchecked( &mut self, address: Address, - block_number: zksync_types::MiniblockNumber, + block_number: MiniblockNumber, ) -> Result>, SqlxError> { let hashed_key = get_code_key(&address).hashed_key(); - async_std::task::block_on(async { + { sqlx::query!( " SELECT bytecode FROM ( @@ -176,17 +150,17 @@ impl StorageWeb3Dal<'_, '_> { .fetch_optional(self.storage.conn()) .await .map(|option_row| option_row.map(|row| row.bytecode)) - }) + } } /// This method doesn't check if block with number equals to `block_number` /// is present in the database. For such blocks `None` will be returned. - pub fn get_factory_dep_unchecked( + pub async fn get_factory_dep_unchecked( &mut self, hash: H256, - block_number: zksync_types::MiniblockNumber, + block_number: MiniblockNumber, ) -> Result>, SqlxError> { - async_std::task::block_on(async { + { sqlx::query!( "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1 AND miniblock_number <= $2", &hash.0.to_vec(), @@ -195,6 +169,6 @@ impl StorageWeb3Dal<'_, '_> { .fetch_optional(self.storage.conn()) .await .map(|option_row| option_row.map(|row| row.bytecode)) - }) + } } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs new file mode 100644 index 000000000000..b34ca121a584 --- /dev/null +++ b/core/lib/dal/src/sync_dal.rs @@ -0,0 +1,80 @@ +use std::time::Instant; + +use crate::models::storage_sync::StorageSyncBlock; +use crate::models::storage_transaction::StorageTransaction; +use crate::SqlxError; +use crate::StorageProcessor; +use zksync_types::api::en::SyncBlock; +use zksync_types::MiniblockNumber; +use zksync_types::{Address, Transaction}; + +/// DAL subset dedicated to the EN synchronization. +#[derive(Debug)] +pub struct SyncDal<'a, 'c> { + pub storage: &'a mut StorageProcessor<'c>, +} + +impl SyncDal<'_, '_> { + pub async fn sync_block( + &mut self, + block_number: MiniblockNumber, + current_operator_address: Address, + include_transactions: bool, + ) -> Result, SqlxError> { + let started_at = Instant::now(); + let storage_block_details: Option = sqlx::query_as!( + StorageSyncBlock, + r#" + SELECT miniblocks.number, + COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as "l1_batch_number!", + (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as "last_batch_miniblock?", + miniblocks.timestamp, + miniblocks.hash as "root_hash?", + commit_tx.tx_hash as "commit_tx_hash?", + commit_tx.confirmed_at as "committed_at?", + prove_tx.tx_hash as "prove_tx_hash?", + prove_tx.confirmed_at as "proven_at?", + execute_tx.tx_hash as "execute_tx_hash?", + execute_tx.confirmed_at as "executed_at?", + miniblocks.l1_gas_price, + miniblocks.l2_fair_gas_price, + miniblocks.bootloader_code_hash, + miniblocks.default_aa_code_hash, + l1_batches.fee_account_address as "fee_account_address?" + FROM miniblocks + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number + LEFT JOIN eth_txs_history as commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL) + LEFT JOIN eth_txs_history as execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL) + WHERE miniblocks.number = $1 + "#, + block_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await?; + + let res = if let Some(storage_block_details) = storage_block_details { + let transactions = if include_transactions { + let block_transactions = sqlx::query_as!( + StorageTransaction, + r#"SELECT * FROM transactions WHERE miniblock_number = $1 ORDER BY index_in_block"#, + block_number.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(Transaction::from) + .collect(); + Some(block_transactions) + } else { + None + }; + Some(storage_block_details.into_sync_block(current_operator_address, transactions)) + } else { + None + }; + + metrics::histogram!("dal.request", started_at.elapsed(), "method" => "sync_dal_sync_block"); + Ok(res) + } +} diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 08dda6980954..d57ddc7cb12e 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -2,18 +2,19 @@ use std::fs; use std::time::Duration; use db_test_macro::db_test; -use zksync_types::block::{L1BatchHeader, MiniblockHeader}; -use zksync_types::proofs::AggregationRound; -use zksync_types::MAX_GAS_PER_PUBDATA_BYTE; +use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ + block::{L1BatchHeader, MiniblockHeader}, fee::{Fee, TransactionExecutionMetrics}, helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, - tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, + proofs::AggregationRound, + tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, - PriorityOpId, H160, H256, U256, + PriorityOpId, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; +use zksync_utils::miniblock_hash; use crate::blocks_dal::BlocksDal; use crate::connection::ConnectionPool; @@ -23,13 +24,27 @@ use crate::transactions_dal::TransactionsDal; use crate::transactions_web3_dal::TransactionsWeb3Dal; use crate::witness_generator_dal::WitnessGeneratorDal; +const DEFAULT_GAS_PER_PUBDATA: u32 = 100; + fn mock_tx_execution_metrics() -> TransactionExecutionMetrics { TransactionExecutionMetrics::default() } -const DEFAULT_GAS_PER_PUBDATA: u32 = 100; +pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader { + MiniblockHeader { + number: MiniblockNumber(number), + timestamp: 0, + hash: miniblock_hash(MiniblockNumber(number)), + l1_tx_count: 0, + l2_tx_count: 0, + base_fee_per_gas: 100, + l1_gas_price: 100, + l2_fair_gas_price: 100, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + } +} -fn mock_l2_transaction() -> L2Tx { +pub(crate) fn mock_l2_transaction() -> L2Tx { let fee = Fee { gas_limit: U256::from(1_000_000u32), max_fee_per_gas: U256::from(250_000_000u32), @@ -87,17 +102,35 @@ fn mock_l1_execute() -> L1Tx { } } +pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionResult { + TransactionExecutionResult { + hash: transaction.hash(), + transaction: transaction.into(), + execution_info: ExecutionMetrics::default(), + execution_status: TxExecutionStatus::Success, + refunded_gas: 0, + operator_suggested_refund: 0, + compressed_bytecodes: vec![], + call_traces: vec![], + revert_reason: None, + } +} + #[db_test(dal_crate)] async fn workflow_with_submit_tx_equal_hashes(connection_pool: ConnectionPool) { let storage = &mut connection_pool.access_test_storage().await; let mut transactions_dal = TransactionsDal { storage }; let tx = mock_l2_transaction(); - let result = transactions_dal.insert_transaction_l2(tx.clone(), mock_tx_execution_metrics()); + let result = transactions_dal + .insert_transaction_l2(tx.clone(), mock_tx_execution_metrics()) + .await; assert_eq!(result, L2TxSubmissionResult::Added); - let result = transactions_dal.insert_transaction_l2(tx, mock_tx_execution_metrics()); + let result = transactions_dal + .insert_transaction_l2(tx, mock_tx_execution_metrics()) + .await; assert_eq!(result, L2TxSubmissionResult::Replaced); } @@ -112,14 +145,18 @@ async fn workflow_with_submit_tx_diff_hashes(connection_pool: ConnectionPool) { let nonce = tx.common_data.nonce; let initiator_address = tx.common_data.initiator_address; - let result = transactions_dal.insert_transaction_l2(tx, mock_tx_execution_metrics()); + let result = transactions_dal + .insert_transaction_l2(tx, mock_tx_execution_metrics()) + .await; assert_eq!(result, L2TxSubmissionResult::Added); let mut tx = mock_l2_transaction(); tx.common_data.nonce = nonce; tx.common_data.initiator_address = initiator_address; - let result = transactions_dal.insert_transaction_l2(tx, mock_tx_execution_metrics()); + let result = transactions_dal + .insert_transaction_l2(tx, mock_tx_execution_metrics()) + .await; assert_eq!(result, L2TxSubmissionResult::Replaced); } @@ -132,67 +169,70 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { // Stuck tx let mut tx = mock_l2_transaction(); tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; - transactions_dal.insert_transaction_l2(tx, mock_tx_execution_metrics()); + transactions_dal + .insert_transaction_l2(tx, mock_tx_execution_metrics()) + .await; // Tx in mempool let tx = mock_l2_transaction(); - transactions_dal.insert_transaction_l2(tx, mock_tx_execution_metrics()); + transactions_dal + .insert_transaction_l2(tx, mock_tx_execution_metrics()) + .await; // Stuck L1 tx. We should never ever remove L1 tx let mut tx = mock_l1_execute(); tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; - transactions_dal.insert_transaction_l1(tx, L1BlockNumber(1)); + transactions_dal + .insert_transaction_l1(tx, L1BlockNumber(1)) + .await; // Old executed tx let mut executed_tx = mock_l2_transaction(); executed_tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; - transactions_dal.insert_transaction_l2(executed_tx.clone(), mock_tx_execution_metrics()); + transactions_dal + .insert_transaction_l2(executed_tx.clone(), mock_tx_execution_metrics()) + .await; // Get all txs - transactions_dal.reset_mempool(); - let txs = transactions_dal.sync_mempool(vec![], vec![], 0, 0, 1000).0; + transactions_dal.reset_mempool().await; + let txs = transactions_dal + .sync_mempool(vec![], vec![], 0, 0, 1000) + .await + .0; assert_eq!(txs.len(), 4); let storage = transactions_dal.storage; - BlocksDal { storage }.insert_miniblock(MiniblockHeader { - number: MiniblockNumber(1), - timestamp: 0, - hash: Default::default(), - l1_tx_count: 0, - l2_tx_count: 0, - base_fee_per_gas: Default::default(), - l1_gas_price: 0, - l2_fair_gas_price: 0, - base_system_contracts_hashes: Default::default(), - }); + BlocksDal { storage } + .insert_miniblock(&create_miniblock_header(1)) + .await; let mut transactions_dal = TransactionsDal { storage }; - transactions_dal.mark_txs_as_executed_in_miniblock( - MiniblockNumber(1), - &[TransactionExecutionResult { - transaction: executed_tx.clone().into(), - hash: executed_tx.hash(), - execution_info: Default::default(), - execution_status: TxExecutionStatus::Success, - refunded_gas: 0, - operator_suggested_refund: 0, - compressed_bytecodes: vec![], - call_traces: vec![], - revert_reason: None, - }], - U256::from(1), - ); + transactions_dal + .mark_txs_as_executed_in_miniblock( + MiniblockNumber(1), + &[mock_execution_result(executed_tx.clone())], + U256::from(1), + ) + .await; // Get all txs - transactions_dal.reset_mempool(); - let txs = transactions_dal.sync_mempool(vec![], vec![], 0, 0, 1000).0; + transactions_dal.reset_mempool().await; + let txs = transactions_dal + .sync_mempool(vec![], vec![], 0, 0, 1000) + .await + .0; assert_eq!(txs.len(), 3); // Remove one stuck tx - let removed_txs = transactions_dal.remove_stuck_txs(Duration::from_secs(500)); + let removed_txs = transactions_dal + .remove_stuck_txs(Duration::from_secs(500)) + .await; assert_eq!(removed_txs, 1); - transactions_dal.reset_mempool(); - let txs = transactions_dal.sync_mempool(vec![], vec![], 0, 0, 1000).0; + transactions_dal.reset_mempool().await; + let txs = transactions_dal + .sync_mempool(vec![], vec![], 0, 0, 1000) + .await + .0; assert_eq!(txs.len(), 2); // We shouldn't collect executed tx @@ -200,6 +240,7 @@ async fn remove_stuck_txs(connection_pool: ConnectionPool) { let mut transactions_web3_dal = TransactionsWeb3Dal { storage }; transactions_web3_dal .get_transaction_receipt(executed_tx.hash()) + .await .unwrap() .unwrap(); } @@ -231,23 +272,28 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { ); storage .blocks_dal() - .insert_l1_batch(header, Default::default()); + .insert_l1_batch(&header, Default::default()) + .await; let mut prover_dal = ProverDal { storage }; let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); - prover_dal.insert_prover_jobs( - l1_batch_number, - circuits.clone(), - AggregationRound::BasicCircuits, - ); + prover_dal + .insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::BasicCircuits, + ) + .await; // try inserting the same jobs again to ensure it does not panic - prover_dal.insert_prover_jobs( - l1_batch_number, - circuits.clone(), - AggregationRound::BasicCircuits, - ); + prover_dal + .insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::BasicCircuits, + ) + .await; let prover_jobs_params = GetProverJobsParams { statuses: None, @@ -259,7 +305,7 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { desc: false, round: None, }; - let jobs = prover_dal.get_jobs(prover_jobs_params).unwrap(); + let jobs = prover_dal.get_jobs(prover_jobs_params).await.unwrap(); assert_eq!(circuits.len(), jobs.len()); } @@ -275,26 +321,31 @@ async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { ); storage .blocks_dal() - .insert_l1_batch(header, Default::default()); + .insert_l1_batch(&header, Default::default()) + .await; let mut prover_dal = ProverDal { storage }; let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); - prover_dal.insert_prover_jobs(l1_batch_number, circuits, AggregationRound::BasicCircuits); + prover_dal + .insert_prover_jobs(l1_batch_number, circuits, AggregationRound::BasicCircuits) + .await; // take all jobs from prover_job table for _ in 1..=4 { - let job = prover_dal.get_next_prover_job(); + let job = prover_dal.get_next_prover_job().await; assert!(job.is_some()); } - let job = prover_dal.get_next_prover_job(); + let job = prover_dal.get_next_prover_job().await; assert!(job.is_none()); // re-queue jobs - let stuck_jobs = prover_dal.requeue_stuck_jobs(Duration::from_secs(0), 10); + let stuck_jobs = prover_dal + .requeue_stuck_jobs(Duration::from_secs(0), 10) + .await; assert_eq!(4, stuck_jobs.len()); // re-check that all jobs can be taken again for _ in 1..=4 { - let job = prover_dal.get_next_prover_job(); + let job = prover_dal.get_next_prover_job().await; assert!(job.is_some()); } } @@ -311,45 +362,52 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: ); storage .blocks_dal() - .insert_l1_batch(header, Default::default()); + .insert_l1_batch(&header, Default::default()) + .await; let mut prover_dal = ProverDal { storage }; let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); - prover_dal.insert_prover_jobs( - l1_batch_number, - circuits.clone(), - AggregationRound::BasicCircuits, - ); + prover_dal + .insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::BasicCircuits, + ) + .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); - let jobs = prover_dal.get_jobs(prover_jobs_params); + let jobs = prover_dal.get_jobs(prover_jobs_params).await; let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); let proof = get_sample_proof(); // mark all basic circuit proofs as successful. - job_ids.iter().for_each(|&id| { - prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") - }); + for id in job_ids.iter() { + prover_dal + .save_proof(*id, Duration::from_secs(0), proof.clone(), "unit-test") + .await; + } let mut witness_generator_dal = WitnessGeneratorDal { storage }; - witness_generator_dal.create_aggregation_jobs( - l1_batch_number, - "basic_circuits_1.bin", - "basic_circuits_inputs_1.bin", - circuits.len(), - "scheduler_witness_1.bin", - ); + witness_generator_dal + .create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ) + .await; // move the leaf aggregation job to be queued - witness_generator_dal.move_leaf_aggregation_jobs_from_waiting_to_queued(); + witness_generator_dal + .move_leaf_aggregation_jobs_from_waiting_to_queued() + .await; // Ensure get-next job gives the leaf aggregation witness job - let job = witness_generator_dal.get_next_leaf_aggregation_witness_job( - Duration::from_secs(0), - 10, - u32::MAX, - ); + let job = witness_generator_dal + .get_next_leaf_aggregation_witness_job(Duration::from_secs(0), 10, u32::MAX) + .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } @@ -365,50 +423,59 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: ); storage .blocks_dal() - .insert_l1_batch(header, Default::default()); + .insert_l1_batch(&header, Default::default()) + .await; let mut prover_dal = ProverDal { storage }; let circuits = create_circuits(); let l1_batch_number = L1BatchNumber(block_number); - prover_dal.insert_prover_jobs( - l1_batch_number, - circuits.clone(), - AggregationRound::LeafAggregation, - ); + prover_dal + .insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::LeafAggregation, + ) + .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); - let jobs = prover_dal.get_jobs(prover_jobs_params); + let jobs = prover_dal.get_jobs(prover_jobs_params).await; let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); let proof = get_sample_proof(); // mark all leaf aggregation circuit proofs as successful. - job_ids.iter().for_each(|&id| { - prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") - }); + for id in job_ids { + prover_dal + .save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") + .await; + } let mut witness_generator_dal = WitnessGeneratorDal { storage }; - witness_generator_dal.create_aggregation_jobs( - l1_batch_number, - "basic_circuits_1.bin", - "basic_circuits_inputs_1.bin", - circuits.len(), - "scheduler_witness_1.bin", - ); - witness_generator_dal.save_leaf_aggregation_artifacts( - l1_batch_number, - circuits.len(), - "leaf_layer_subqueues_1.bin", - "aggregation_outputs_1.bin", - ); + witness_generator_dal + .create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ) + .await; + witness_generator_dal + .save_leaf_aggregation_artifacts( + l1_batch_number, + circuits.len(), + "leaf_layer_subqueues_1.bin", + "aggregation_outputs_1.bin", + ) + .await; // move the leaf aggregation job to be queued - witness_generator_dal.move_node_aggregation_jobs_from_waiting_to_queued(); + witness_generator_dal + .move_node_aggregation_jobs_from_waiting_to_queued() + .await; // Ensure get-next job gives the node aggregation witness job - let job = witness_generator_dal.get_next_node_aggregation_witness_job( - Duration::from_secs(0), - 10, - u32::MAX, - ); + let job = witness_generator_dal + .get_next_node_aggregation_witness_job(Duration::from_secs(0), 10, u32::MAX) + .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } @@ -424,7 +491,8 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec ); storage .blocks_dal() - .insert_l1_batch(header, Default::default()); + .insert_l1_batch(&header, Default::default()) + .await; let mut prover_dal = ProverDal { storage }; let circuits = vec![( @@ -432,38 +500,48 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec "1_0_Node aggregation_NodeAggregation.bin".to_owned(), )]; let l1_batch_number = L1BatchNumber(block_number); - prover_dal.insert_prover_jobs( - l1_batch_number, - circuits.clone(), - AggregationRound::NodeAggregation, - ); + prover_dal + .insert_prover_jobs( + l1_batch_number, + circuits.clone(), + AggregationRound::NodeAggregation, + ) + .await; let prover_jobs_params = get_default_prover_jobs_params(l1_batch_number); - let jobs = prover_dal.get_jobs(prover_jobs_params); + let jobs = prover_dal.get_jobs(prover_jobs_params).await; let job_ids: Vec = jobs.unwrap().into_iter().map(|job| job.id).collect(); let proof = get_sample_proof(); // mark node aggregation circuit proofs as successful. - job_ids.iter().for_each(|&id| { - prover_dal.save_proof(id, Duration::from_secs(0), proof.clone(), "unit-test") - }); + for id in &job_ids { + prover_dal + .save_proof(*id, Duration::from_secs(0), proof.clone(), "unit-test") + .await; + } let mut witness_generator_dal = WitnessGeneratorDal { storage }; - witness_generator_dal.create_aggregation_jobs( - l1_batch_number, - "basic_circuits_1.bin", - "basic_circuits_inputs_1.bin", - circuits.len(), - "scheduler_witness_1.bin", - ); witness_generator_dal - .save_node_aggregation_artifacts(l1_batch_number, "final_node_aggregations_1.bin"); + .create_aggregation_jobs( + l1_batch_number, + "basic_circuits_1.bin", + "basic_circuits_inputs_1.bin", + circuits.len(), + "scheduler_witness_1.bin", + ) + .await; + witness_generator_dal + .save_node_aggregation_artifacts(l1_batch_number, "final_node_aggregations_1.bin") + .await; // move the leaf aggregation job to be queued - witness_generator_dal.move_scheduler_jobs_from_waiting_to_queued(); + witness_generator_dal + .move_scheduler_jobs_from_waiting_to_queued() + .await; // Ensure get-next job gives the scheduler witness job - let job = - witness_generator_dal.get_next_scheduler_witness_job(Duration::from_secs(0), 10, u32::MAX); + let job = witness_generator_dal + .get_next_scheduler_witness_job(Duration::from_secs(0), 10, u32::MAX) + .await; assert_eq!(l1_batch_number, job.unwrap().block_number); } diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 9f3628a3de9b..8b32b7140733 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -14,12 +14,12 @@ pub(crate) const STORED_USD_PRICE_PRECISION: usize = 6; #[derive(Debug)] pub struct TokensDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl TokensDal<'_, '_> { - pub fn add_tokens(&mut self, tokens: Vec) { - async_std::task::block_on(async { + pub async fn add_tokens(&mut self, tokens: Vec) { + { let mut copy = self .storage .conn() @@ -53,11 +53,15 @@ impl TokensDal<'_, '_> { } copy.send(bytes).await.unwrap(); copy.finish().await.unwrap(); - }) + } } - pub fn update_well_known_l1_token(&mut self, l1_address: &Address, metadata: TokenMetadata) { - async_std::task::block_on(async { + pub async fn update_well_known_l1_token( + &mut self, + l1_address: &Address, + metadata: TokenMetadata, + ) { + { sqlx::query!( "UPDATE tokens SET token_list_name = $2, token_list_symbol = $3, token_list_decimals = $4, well_known = true, updated_at = now() @@ -71,11 +75,11 @@ impl TokensDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_well_known_token_addresses(&mut self) -> Vec<(Address, Address)> { - async_std::task::block_on(async { + pub async fn get_well_known_token_addresses(&mut self) -> Vec<(Address, Address)> { + { let records = sqlx::query!("SELECT l1_address, l2_address FROM tokens WHERE well_known = true") .fetch_all(self.storage.conn()) @@ -91,11 +95,25 @@ impl TokensDal<'_, '_> { }) .collect(); addresses - }) + } } - pub fn get_unknown_l1_token_addresses(&mut self) -> Vec
{ - async_std::task::block_on(async { + pub async fn get_all_l2_token_addresses(&mut self) -> Vec
{ + { + let records = sqlx::query!("SELECT l2_address FROM tokens") + .fetch_all(self.storage.conn()) + .await + .unwrap(); + let addresses: Vec
= records + .into_iter() + .map(|record| Address::from_slice(&record.l2_address)) + .collect(); + addresses + } + } + + pub async fn get_unknown_l1_token_addresses(&mut self) -> Vec
{ + { let records = sqlx::query!("SELECT l1_address FROM tokens WHERE well_known = false") .fetch_all(self.storage.conn()) .await @@ -105,11 +123,11 @@ impl TokensDal<'_, '_> { .map(|record| Address::from_slice(&record.l1_address)) .collect(); addresses - }) + } } - pub fn get_l1_tokens_by_volume(&mut self, min_volume: &Ratio) -> Vec
{ - async_std::task::block_on(async { + pub async fn get_l1_tokens_by_volume(&mut self, min_volume: &Ratio) -> Vec
{ + { let min_volume = ratio_to_big_decimal(min_volume, STORED_USD_PRICE_PRECISION); let records = sqlx::query!( "SELECT l1_address FROM tokens WHERE market_volume > $1", @@ -123,11 +141,11 @@ impl TokensDal<'_, '_> { .map(|record| Address::from_slice(&record.l1_address)) .collect(); addresses - }) + } } - pub fn set_l1_token_price(&mut self, l1_address: &Address, price: TokenPrice) { - async_std::task::block_on(async { + pub async fn set_l1_token_price(&mut self, l1_address: &Address, price: TokenPrice) { + { sqlx::query!( "UPDATE tokens SET usd_price = $2, usd_price_updated_at = $3, updated_at = now() WHERE l1_address = $1", l1_address.as_bytes(), @@ -137,15 +155,15 @@ impl TokensDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn set_l1_token_market_volume( + pub async fn set_l1_token_market_volume( &mut self, l1_address: &Address, market_volume: TokenMarketVolume, ) { - async_std::task::block_on(async { + { sqlx::query!( "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1", l1_address.as_bytes(), @@ -155,11 +173,14 @@ impl TokensDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_token_market_volume(&mut self, l2_address: &Address) -> Option { - async_std::task::block_on(async { + pub async fn get_token_market_volume( + &mut self, + l2_address: &Address, + ) -> Option { + { let storage_market_volume = sqlx::query_as!( StorageTokenMarketVolume, "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1", @@ -169,11 +190,11 @@ impl TokensDal<'_, '_> { .await .unwrap(); storage_market_volume.and_then(Into::into) - }) + } } - pub fn rollback_tokens(&mut self, block_number: MiniblockNumber) { - async_std::task::block_on(async { + pub async fn rollback_tokens(&mut self, block_number: MiniblockNumber) { + { sqlx::query!( " DELETE FROM tokens @@ -196,6 +217,6 @@ impl TokensDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } } diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index d828c0684e02..58e9a7ec9a66 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -14,12 +14,12 @@ pub(crate) const STORED_USD_PRICE_PRECISION: usize = 6; #[derive(Debug)] pub struct TokensWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl TokensWeb3Dal<'_, '_> { - pub fn get_well_known_tokens(&mut self) -> Result, SqlxError> { - async_std::task::block_on(async { + pub async fn get_well_known_tokens(&mut self) -> Result, SqlxError> { + { let records = sqlx::query!( "SELECT l1_address, l2_address, name, symbol, decimals FROM tokens WHERE well_known = true @@ -40,17 +40,17 @@ impl TokensWeb3Dal<'_, '_> { }) .collect(); Ok(result) - }) + } } - pub fn is_token_actively_trading( + pub async fn is_token_actively_trading( &mut self, l2_token: &Address, min_volume: &Ratio, max_acceptable_volume_age_in_secs: u32, max_acceptable_price_age_in_secs: u32, ) -> Result { - async_std::task::block_on(async { + { let min_volume = ratio_to_big_decimal(min_volume, STORED_USD_PRICE_PRECISION); let volume_pg_interval = PgInterval { months: 0, @@ -79,14 +79,14 @@ impl TokensWeb3Dal<'_, '_> { .unwrap() .count; Ok(count == 1) - }) + } } - pub fn get_token_price( + pub async fn get_token_price( &mut self, l2_address: &Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let storage_price = sqlx::query_as!( StorageTokenPrice, "SELECT usd_price, usd_price_updated_at FROM tokens WHERE l2_address = $1", @@ -96,14 +96,14 @@ impl TokensWeb3Dal<'_, '_> { .await?; Ok(storage_price.and_then(Into::into)) - }) + } } - pub fn get_token_metadata( + pub async fn get_token_metadata( &mut self, l2_address: &Address, ) -> Result, SqlxError> { - async_std::task::block_on(async { + { let storage_token_metadata = sqlx::query_as!( StorageTokenMetadata, r#" @@ -119,6 +119,6 @@ impl TokensWeb3Dal<'_, '_> { .await?; Ok(storage_token_metadata.map(Into::into)) - }) + } } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index b9179156bae4..d729c0682b4e 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -31,21 +31,23 @@ pub enum L2TxSubmissionResult { Duplicate, Proxied, } + impl fmt::Display for L2TxSubmissionResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } +#[derive(Debug)] pub struct TransactionsDal<'c, 'a> { - pub storage: &'c mut StorageProcessor<'a>, + pub(crate) storage: &'c mut StorageProcessor<'a>, } type TxLocations = Vec<(MiniblockNumber, Vec<(H256, u32, u16)>)>; impl TransactionsDal<'_, '_> { - pub fn insert_transaction_l1(&mut self, tx: L1Tx, l1_block_number: L1BlockNumber) { - async_std::task::block_on(async { + pub async fn insert_transaction_l1(&mut self, tx: L1Tx, l1_block_number: L1BlockNumber) { + { let contract_address = tx.execute.contract_address.as_bytes().to_vec(); let tx_hash = tx.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) @@ -127,15 +129,15 @@ impl TransactionsDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn insert_transaction_l2( + pub async fn insert_transaction_l2( &mut self, tx: L2Tx, exec_info: TransactionExecutionMetrics, ) -> L2TxSubmissionResult { - async_std::task::block_on(async { + { let contract_address = tx.execute.contract_address.as_bytes().to_vec(); let tx_hash = tx.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) @@ -286,15 +288,15 @@ impl TransactionsDal<'_, '_> { ); l2_tx_insertion_result - }) + } } - pub fn mark_txs_as_executed_in_l1_batch( + pub async fn mark_txs_as_executed_in_l1_batch( &mut self, block_number: L1BatchNumber, transactions: &[TransactionExecutionResult], ) { - async_std::task::block_on(async { + { let hashes: Vec> = transactions .iter() .map(|tx| tx.hash.as_bytes().to_vec()) @@ -321,22 +323,23 @@ impl TransactionsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn mark_txs_as_executed_in_miniblock( + pub async fn mark_txs_as_executed_in_miniblock( &mut self, miniblock_number: MiniblockNumber, transactions: &[TransactionExecutionResult], block_base_fee_per_gas: U256, ) { - async_std::task::block_on(async { + { let mut transaction = self.storage.start_transaction().await; let mut l1_hashes = Vec::with_capacity(transactions.len()); let mut l1_indices_in_block = Vec::with_capacity(transactions.len()); let mut l1_errors = Vec::with_capacity(transactions.len()); let mut l1_execution_infos = Vec::with_capacity(transactions.len()); let mut l1_refunded_gas = Vec::with_capacity(transactions.len()); + let mut l1_effective_gas_prices = Vec::with_capacity(transactions.len()); let mut l2_hashes = Vec::with_capacity(transactions.len()); let mut l2_values = Vec::with_capacity(transactions.len()); @@ -394,12 +397,14 @@ impl TransactionsDal<'_, '_> { } match &transaction.common_data { - ExecuteTransactionCommon::L1(_) => { + ExecuteTransactionCommon::L1(common_data) => { l1_hashes.push(hash.0.to_vec()); l1_indices_in_block.push(index_in_block as i32); l1_errors.push(error.unwrap_or_default()); l1_execution_infos.push(serde_json::to_value(execution_info).unwrap()); l1_refunded_gas.push(*refunded_gas as i64); + l1_effective_gas_prices + .push(u256_to_big_decimal(common_data.max_fee_per_gas)); } ExecuteTransactionCommon::L2(common_data) => { let data = serde_json::to_value(&transaction.execute).unwrap(); @@ -510,7 +515,7 @@ impl TransactionsDal<'_, '_> { &l2_errors, &l2_effective_gas_prices, &l2_execution_infos, - &l2_inputs, + &l2_inputs as &[&[u8]], &l2_datas, &l2_refunded_gas, &l2_values, @@ -536,6 +541,7 @@ impl TransactionsDal<'_, '_> { in_mempool=FALSE, execution_info = execution_info || data_table.new_execution_info, refunded_gas = data_table.refunded_gas, + effective_gas_price = data_table.effective_gas_price, updated_at = now() FROM ( @@ -544,7 +550,8 @@ impl TransactionsDal<'_, '_> { UNNEST($3::integer[]) AS index_in_block, UNNEST($4::varchar[]) AS error, UNNEST($5::jsonb[]) AS new_execution_info, - UNNEST($6::bigint[]) as refunded_gas + UNNEST($6::bigint[]) as refunded_gas, + UNNEST($7::numeric[]) as effective_gas_price ) AS data_table WHERE transactions.hash = data_table.hash "#, @@ -554,6 +561,7 @@ impl TransactionsDal<'_, '_> { &l1_errors, &l1_execution_infos, &l1_refunded_gas, + &l1_effective_gas_prices, ) .execute(transaction.conn()) .await @@ -578,11 +586,11 @@ impl TransactionsDal<'_, '_> { metrics::histogram!("dal.transactions.insert_call_tracer", started_at.elapsed()); } transaction.commit().await; - }) + } } - pub fn mark_tx_as_rejected(&mut self, transaction_hash: H256, error: &str) { - async_std::task::block_on(async { + pub async fn mark_tx_as_rejected(&mut self, transaction_hash: H256, error: &str) { + { // If the rejected tx has been replaced, it means that this tx hash does not exist in the database // and we will update nothing. // These txs don't affect the state, so we can just easily skip this update. @@ -596,11 +604,11 @@ impl TransactionsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn reset_transactions_state(&mut self, miniblock_number: MiniblockNumber) { - async_std::task::block_on(async { + pub async fn reset_transactions_state(&mut self, miniblock_number: MiniblockNumber) { + { let tx_hashes = sqlx::query!( "UPDATE transactions SET l1_batch_number = NULL, miniblock_number = NULL, error = NULL, index_in_block = NULL, execution_info = '{}' @@ -623,11 +631,11 @@ impl TransactionsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn remove_stuck_txs(&mut self, stuck_tx_timeout: Duration) -> usize { - async_std::task::block_on(async { + pub async fn remove_stuck_txs(&mut self, stuck_tx_timeout: Duration) -> usize { + { let stuck_tx_timeout = pg_interval_from_duration(stuck_tx_timeout); sqlx::query!( "DELETE FROM transactions \ @@ -640,13 +648,13 @@ impl TransactionsDal<'_, '_> { .await .unwrap() .len() - }) + } } /// Fetches new updates for mempool /// Returns new transactions and current nonces for related accounts /// Latter is only used to bootstrap mempool for given account - pub fn sync_mempool( + pub async fn sync_mempool( &mut self, stashed_accounts: Vec
, purged_accounts: Vec
, @@ -654,7 +662,7 @@ impl TransactionsDal<'_, '_> { fee_per_gas: u64, limit: usize, ) -> (Vec, HashMap) { - async_std::task::block_on(async { + { let stashed_addresses: Vec<_> = stashed_accounts.into_iter().map(|a| a.0.to_vec()).collect(); sqlx::query!( @@ -689,6 +697,7 @@ impl TransactionsDal<'_, '_> { AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3)) ORDER BY is_priority DESC, priority_op_id, received_at LIMIT $1 + FOR UPDATE ) as subquery WHERE transactions.hash = subquery.hash RETURNING transactions.*", @@ -730,20 +739,20 @@ impl TransactionsDal<'_, '_> { transactions.into_iter().map(|tx| tx.into()).collect(), nonces, ) - }) + } } - pub fn reset_mempool(&mut self) { - async_std::task::block_on(async { + pub async fn reset_mempool(&mut self) { + { sqlx::query!("UPDATE transactions SET in_mempool = FALSE WHERE in_mempool = TRUE") .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_last_processed_l1_block(&mut self) -> Option { - async_std::task::block_on(async { + pub async fn get_last_processed_l1_block(&mut self) -> Option { + { sqlx::query!( "SELECT l1_block_number FROM transactions WHERE priority_op_id IS NOT NULL @@ -754,11 +763,11 @@ impl TransactionsDal<'_, '_> { .await .unwrap() .and_then(|x| x.l1_block_number.map(|block| L1BlockNumber(block as u32))) - }) + } } - pub fn last_priority_id(&mut self) -> Option { - async_std::task::block_on(async { + pub async fn last_priority_id(&mut self) -> Option { + { let op_id = sqlx::query!( r#"SELECT MAX(priority_op_id) as "op_id" from transactions where is_priority = true"# ) @@ -767,11 +776,11 @@ impl TransactionsDal<'_, '_> { .unwrap()? .op_id?; Some(PriorityOpId(op_id as u64)) - }) + } } - pub fn next_priority_id(&mut self) -> PriorityOpId { - async_std::task::block_on(async { + pub async fn next_priority_id(&mut self) -> PriorityOpId { + { sqlx::query!( r#"SELECT MAX(priority_op_id) as "op_id" from transactions where is_priority = true AND miniblock_number IS NOT NULL"# ) @@ -781,11 +790,11 @@ impl TransactionsDal<'_, '_> { .and_then(|row| row.op_id) .map(|value| PriorityOpId((value + 1) as u64)) .unwrap_or_default() - }) + } } - pub fn insert_trace(&mut self, hash: H256, trace: VmExecutionTrace) { - async_std::task::block_on(async { + pub async fn insert_trace(&mut self, hash: H256, trace: VmExecutionTrace) { + { sqlx::query!( "INSERT INTO transaction_traces (tx_hash, trace, created_at, updated_at) VALUES ($1, $2, now(), now())", hash.as_bytes(), @@ -794,11 +803,11 @@ impl TransactionsDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_trace(&mut self, hash: H256) -> Option { - async_std::task::block_on(async { + pub async fn get_trace(&mut self, hash: H256) -> Option { + { let trace = sqlx::query!( "SELECT trace FROM transaction_traces WHERE tx_hash = $1", hash.as_bytes() @@ -811,15 +820,17 @@ impl TransactionsDal<'_, '_> { serde_json::from_value(trace) .unwrap_or_else(|_| panic!("invalid trace json in database for {:?}", hash)) }) - }) + } } // Returns transactions that state_keeper needs to reexecute on restart. // That is the transactions that are included to some miniblock, // but not included to L1 batch. The order of the transactions is the same as it was // during the previous execution. - pub fn get_transactions_to_reexecute(&mut self) -> Vec<(MiniblockNumber, Vec)> { - async_std::task::block_on(async { + pub async fn get_transactions_to_reexecute( + &mut self, + ) -> Vec<(MiniblockNumber, Vec)> { + { sqlx::query_as!( StorageTransaction, " @@ -842,11 +853,11 @@ impl TransactionsDal<'_, '_> { ) }) .collect() - }) + } } - pub fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { - async_std::task::block_on(async { + pub async fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { + { sqlx::query!( r#" SELECT miniblock_number as "miniblock_number!", @@ -871,11 +882,11 @@ impl TransactionsDal<'_, '_> { ) }) .collect() - }) + } } - pub fn get_call_trace(&mut self, tx_hash: H256) -> Option { - async_std::task::block_on(async { + pub async fn get_call_trace(&mut self, tx_hash: H256) -> Option { + { sqlx::query_as!( CallTrace, r#" @@ -888,6 +899,46 @@ impl TransactionsDal<'_, '_> { .await .unwrap() .map(|trace| trace.into()) - }) + } + } + + pub async fn migrate_l1_txs_effective_gas_price_pre_m6( + &mut self, + from_block: u32, + to_block: u32, + ) { + sqlx::query!( + " + UPDATE transactions + SET effective_gas_price = 0 + WHERE miniblock_number BETWEEN $1 AND $2 + AND is_priority = TRUE + ", + from_block as i32, + to_block as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn migrate_l1_txs_effective_gas_price_post_m6( + &mut self, + from_block: u32, + to_block: u32, + ) { + sqlx::query!( + " + UPDATE transactions + SET effective_gas_price = max_fee_per_gas + WHERE miniblock_number BETWEEN $1 AND $2 + AND is_priority = TRUE + ", + from_block as i32, + to_block as i32, + ) + .execute(self.storage.conn()) + .await + .unwrap(); } } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index f2222e292b44..8d214f2d4bb8 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,37 +1,33 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_types::{ - api::{ - BlockId, BlockNumber, L2ToL1Log, Log, Transaction, TransactionDetails, TransactionId, - TransactionReceipt, - }, - Address, L2ChainId, MiniblockNumber, ACCOUNT_CODE_STORAGE_ADDRESS, + api, Address, L2ChainId, MiniblockNumber, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, U256, U64, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; use crate::models::{ storage_block::{bind_block_where_sql_params, web3_block_where_sql}, - storage_event::{StorageL2ToL1Log, StorageWeb3Log}, + storage_event::StorageWeb3Log, storage_transaction::{ extract_web3_transaction, web3_transaction_select_sql, StorageTransaction, StorageTransactionDetails, }, }; -use crate::SqlxError; -use crate::StorageProcessor; +use crate::{SqlxError, StorageProcessor}; +#[derive(Debug)] pub struct TransactionsWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl TransactionsWeb3Dal<'_, '_> { - pub fn get_transaction_receipt( + pub async fn get_transaction_receipt( &mut self, hash: H256, - ) -> Result, SqlxError> { - async_std::task::block_on(async { - let receipt: Option = sqlx::query!( + ) -> Result, SqlxError> { + { + let receipt = sqlx::query!( r#" WITH sl AS ( SELECT * FROM storage_logs @@ -78,7 +74,7 @@ impl TransactionsWeb3Dal<'_, '_> { let tx_type = db_row.tx_format.map(U64::from).unwrap_or_default(); let transaction_index = db_row.index_in_block.map(U64::from).unwrap_or_default(); - TransactionReceipt { + api::TransactionReceipt { transaction_hash: H256::from_slice(&db_row.tx_hash), transaction_index, block_hash: db_row @@ -127,7 +123,7 @@ impl TransactionsWeb3Dal<'_, '_> { }); match receipt { Some(mut receipt) => { - let logs: Vec = sqlx::query_as!( + let logs: Vec<_> = sqlx::query_as!( StorageWeb3Log, r#" SELECT @@ -144,101 +140,82 @@ impl TransactionsWeb3Dal<'_, '_> { .fetch_all(self.storage.conn()) .await? .into_iter() - .map(|storage_log: StorageWeb3Log| { - let mut log = Log::from(storage_log); + .map(|storage_log| { + let mut log = api::Log::from(storage_log); log.block_hash = receipt.block_hash; log.l1_batch_number = receipt.l1_batch_number; log }) .collect(); + receipt.logs = logs; - let l2_to_l1_logs: Vec = sqlx::query_as!( - StorageL2ToL1Log, - r#" - SELECT - miniblock_number, log_index_in_miniblock, log_index_in_tx, tx_hash, - Null::bytea as "block_hash", Null::bigint as "l1_batch_number?", - shard_id, is_service, tx_index_in_miniblock, tx_index_in_l1_batch, sender, key, value - FROM l2_to_l1_logs - WHERE tx_hash = $1 - ORDER BY log_index_in_tx ASC - "#, - hash.as_bytes() - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(|storage_l2_to_l1_log: StorageL2ToL1Log| { - let mut l2_to_l1_log = L2ToL1Log::from(storage_l2_to_l1_log); - l2_to_l1_log.block_hash = receipt.block_hash; - l2_to_l1_log.l1_batch_number = receipt.l1_batch_number; - l2_to_l1_log - }) - .collect(); + let l2_to_l1_logs = self.storage.events_dal().l2_to_l1_logs(hash).await?; + let l2_to_l1_logs: Vec<_> = l2_to_l1_logs + .into_iter() + .map(|storage_l2_to_l1_log| { + let mut l2_to_l1_log = api::L2ToL1Log::from(storage_l2_to_l1_log); + l2_to_l1_log.block_hash = receipt.block_hash; + l2_to_l1_log.l1_batch_number = receipt.l1_batch_number; + l2_to_l1_log + }) + .collect(); receipt.l2_to_l1_logs = l2_to_l1_logs; Ok(Some(receipt)) } None => Ok(None), } - }) + } } - pub fn get_transaction( + pub async fn get_transaction( &mut self, - transaction_id: TransactionId, + transaction_id: api::TransactionId, chain_id: L2ChainId, - ) -> Result, SqlxError> { - async_std::task::block_on(async { - let where_sql = match transaction_id { - TransactionId::Hash(_) => "transactions.hash = $1".to_owned(), - TransactionId::Block(block_id, _) => { - format!( - "transactions.index_in_block = $1 AND {}", - web3_block_where_sql(block_id, 2) - ) - } - }; - let query = format!( - r#" - SELECT - {} - FROM transactions - LEFT JOIN miniblocks - ON miniblocks.number = transactions.miniblock_number - WHERE {} - "#, - web3_transaction_select_sql(), - where_sql - ); - let query = sqlx::query(&query); - - let query = match transaction_id { - TransactionId::Hash(tx_hash) => query.bind(tx_hash.0.to_vec()), - TransactionId::Block(block_id, tx_index) => { - let tx_index = if tx_index.as_u64() > i32::MAX as u64 { - return Ok(None); - } else { - tx_index.as_u64() as i32 - }; - bind_block_where_sql_params(block_id, query.bind(tx_index)) - } - }; + ) -> Result, SqlxError> { + let where_sql = match transaction_id { + api::TransactionId::Hash(_) => "transactions.hash = $1".to_owned(), + api::TransactionId::Block(block_id, _) => { + format!( + "transactions.index_in_block = $1 AND {}", + web3_block_where_sql(block_id, 2) + ) + } + }; + let query = format!( + "SELECT {} + FROM transactions + LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + WHERE {where_sql}", + web3_transaction_select_sql() + ); + let query = sqlx::query(&query); - let tx = query - .fetch_optional(self.storage.conn()) - .await? - .map(|row| extract_web3_transaction(row, chain_id)); - Ok(tx) - }) + let query = match &transaction_id { + api::TransactionId::Hash(tx_hash) => query.bind(tx_hash.as_bytes()), + api::TransactionId::Block(block_id, tx_index) => { + let tx_index = if tx_index.as_u64() > i32::MAX as u64 { + return Ok(None); + } else { + tx_index.as_u64() as i32 + }; + bind_block_where_sql_params(block_id, query.bind(tx_index)) + } + }; + + let tx = query + .fetch_optional(self.storage.conn()) + .await? + .map(|row| extract_web3_transaction(row, chain_id)); + Ok(tx) } - pub fn get_transaction_details( + pub async fn get_transaction_details( &mut self, hash: H256, - ) -> Result, SqlxError> { - async_std::task::block_on(async { + ) -> Result, SqlxError> { + { let storage_tx_details: Option = sqlx::query_as!( StorageTransactionDetails, r#" @@ -264,112 +241,215 @@ impl TransactionsWeb3Dal<'_, '_> { let tx = storage_tx_details.map(|tx_details| tx_details.into()); Ok(tx) - }) + } } /// Returns hashes of txs which were received after `from_timestamp` and the time of receiving the last tx. - pub fn get_pending_txs_hashes_after( + pub async fn get_pending_txs_hashes_after( &mut self, from_timestamp: NaiveDateTime, limit: Option, ) -> Result<(Vec, Option), SqlxError> { - async_std::task::block_on(async { - let records = sqlx::query!( - " - SELECT transactions.hash, transactions.received_at - FROM transactions - LEFT JOIN miniblocks ON miniblocks.number = miniblock_number - WHERE received_at > $1 - ORDER BY received_at ASC - LIMIT $2 - ", - from_timestamp, - limit.map(|l| l as i64) - ) - .fetch_all(self.storage.conn()) - .await?; - let last_loc = records.last().map(|record| record.received_at); - let hashes = records - .into_iter() - .map(|record| H256::from_slice(&record.hash)) - .collect(); - Ok((hashes, last_loc)) - }) + let records = sqlx::query!( + "SELECT transactions.hash, transactions.received_at \ + FROM transactions \ + LEFT JOIN miniblocks ON miniblocks.number = miniblock_number \ + WHERE received_at > $1 \ + ORDER BY received_at ASC \ + LIMIT $2", + from_timestamp, + limit.map(|limit| limit as i64) + ) + .fetch_all(self.storage.conn()) + .await?; + + let last_loc = records.last().map(|record| record.received_at); + let hashes = records + .into_iter() + .map(|record| H256::from_slice(&record.hash)) + .collect(); + Ok((hashes, last_loc)) } - pub fn next_nonce_by_initiator_account( + pub async fn next_nonce_by_initiator_account( &mut self, initiator_address: Address, ) -> Result { - async_std::task::block_on(async { - let latest_nonce = self - .storage - .storage_web3_dal() - .get_address_historical_nonce( - initiator_address, - BlockId::Number(BlockNumber::Latest), - )? - .expect("Failed to get `latest` nonce") - .as_u64(); - - // Get nonces of non-rejected transactions, starting from the 'latest' nonce. - // `latest` nonce is used, because it is guaranteed that there are no gaps before it. - // `(miniblock_number IS NOT NULL OR error IS NULL)` is the condition that filters non-rejected transactions. - // Query is fast because we have an index on (`initiator_address`, `nonce`) - // and it cannot return more than `max_nonce_ahead` nonces. - let non_rejected_nonces: Vec = sqlx::query!( - r#" - SELECT nonce as "nonce!" FROM transactions - WHERE initiator_address = $1 AND nonce >= $2 - AND is_priority = FALSE - AND (miniblock_number IS NOT NULL OR error IS NULL) - ORDER BY nonce - "#, - initiator_address.0.to_vec(), - latest_nonce as i64 - ) - .fetch_all(self.storage.conn()) + let latest_block_number = self + .storage + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest)) .await? - .into_iter() - .map(|row| row.nonce as u64) - .collect(); + .expect("Failed to get `latest` nonce"); + let latest_nonce = self + .storage + .storage_web3_dal() + .get_address_historical_nonce(initiator_address, latest_block_number) + .await? + .as_u64(); - // Find pending nonce as the first "gap" in nonces. - let mut pending_nonce = latest_nonce; - for nonce in non_rejected_nonces { - if pending_nonce == nonce { - pending_nonce += 1; - } else { - break; - } + // Get nonces of non-rejected transactions, starting from the 'latest' nonce. + // `latest` nonce is used, because it is guaranteed that there are no gaps before it. + // `(miniblock_number IS NOT NULL OR error IS NULL)` is the condition that filters non-rejected transactions. + // Query is fast because we have an index on (`initiator_address`, `nonce`) + // and it cannot return more than `max_nonce_ahead` nonces. + let non_rejected_nonces: Vec = sqlx::query!( + "SELECT nonce as \"nonce!\" FROM transactions \ + WHERE initiator_address = $1 AND nonce >= $2 \ + AND is_priority = FALSE \ + AND (miniblock_number IS NOT NULL OR error IS NULL) \ + ORDER BY nonce", + initiator_address.as_bytes(), + latest_nonce as i64 + ) + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|row| row.nonce as u64) + .collect(); + + // Find pending nonce as the first "gap" in nonces. + let mut pending_nonce = latest_nonce; + for nonce in non_rejected_nonces { + if pending_nonce == nonce { + pending_nonce += 1; + } else { + break; } + } - Ok(U256::from(pending_nonce)) - }) + Ok(U256::from(pending_nonce)) } /// Returns the server transactions (not API ones) from a certain miniblock. /// Returns an empty list if the miniblock doesn't exist. - pub fn get_raw_miniblock_transactions( + pub async fn get_raw_miniblock_transactions( &mut self, miniblock: MiniblockNumber, - ) -> Result, SqlxError> { - async_std::task::block_on(async { - let txs = sqlx::query_as!( - StorageTransaction, - " - SELECT * FROM transactions - WHERE miniblock_number = $1 - ORDER BY index_in_block - ", - miniblock.0 as i64 - ) - .fetch_all(self.storage.conn()) - .await? - .into_iter() - .map(zksync_types::Transaction::from) - .collect(); - Ok(txs) - }) + ) -> Result, SqlxError> { + let rows = sqlx::query_as!( + StorageTransaction, + "SELECT * FROM transactions \ + WHERE miniblock_number = $1 \ + ORDER BY index_in_block", + miniblock.0 as i64 + ) + .fetch_all(self.storage.conn()) + .await?; + + Ok(rows.into_iter().map(Into::into).collect()) + } +} + +#[cfg(test)] +mod tests { + use db_test_macro::db_test; + use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx}; + use zksync_utils::miniblock_hash; + + use super::*; + use crate::{ + tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + ConnectionPool, + }; + + async fn prepare_transaction(conn: &mut StorageProcessor<'_>, tx: L2Tx) { + conn.blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await; + conn.transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + conn.blocks_dal() + .insert_miniblock(&create_miniblock_header(0)) + .await; + let mut miniblock_header = create_miniblock_header(1); + miniblock_header.l2_tx_count = 1; + conn.blocks_dal().insert_miniblock(&miniblock_header).await; + + let tx_results = [mock_execution_result(tx)]; + conn.transactions_dal() + .mark_txs_as_executed_in_miniblock(MiniblockNumber(1), &tx_results, U256::from(1)) + .await; + } + + #[db_test(dal_crate)] + async fn getting_transaction(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + let tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + prepare_transaction(&mut conn, tx).await; + + let block_ids = [ + api::BlockId::Number(api::BlockNumber::Latest), + api::BlockId::Number(api::BlockNumber::Number(1.into())), + api::BlockId::Hash(miniblock_hash(MiniblockNumber(1))), + ]; + let transaction_ids = block_ids + .iter() + .map(|&block_id| api::TransactionId::Block(block_id, 0.into())) + .chain([api::TransactionId::Hash(tx_hash)]); + + for transaction_id in transaction_ids { + let web3_tx = conn + .transactions_web3_dal() + .get_transaction(transaction_id, L2ChainId(270)) + .await; + let web3_tx = web3_tx.unwrap().unwrap(); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.block_number, Some(1.into())); + assert_eq!(web3_tx.transaction_index, Some(0.into())); + } + + let transactions_with_bogus_index = block_ids + .iter() + .map(|&block_id| api::TransactionId::Block(block_id, 1.into())); + for transaction_id in transactions_with_bogus_index { + let web3_tx = conn + .transactions_web3_dal() + .get_transaction(transaction_id, L2ChainId(270)) + .await; + assert!(web3_tx.unwrap().is_none()); + } + + let bogus_block_ids = [ + api::BlockId::Number(api::BlockNumber::Earliest), + api::BlockId::Number(api::BlockNumber::Pending), + api::BlockId::Number(api::BlockNumber::Number(42.into())), + api::BlockId::Hash(H256::zero()), + ]; + let transactions_with_bogus_block = bogus_block_ids + .iter() + .map(|&block_id| api::TransactionId::Block(block_id, 0.into())); + for transaction_id in transactions_with_bogus_block { + let web3_tx = conn + .transactions_web3_dal() + .get_transaction(transaction_id, L2ChainId(270)) + .await; + assert!(web3_tx.unwrap().is_none()); + } + } + + #[db_test(dal_crate)] + async fn getting_miniblock_transactions(connection_pool: ConnectionPool) { + let mut conn = connection_pool.access_test_storage().await; + let tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + prepare_transaction(&mut conn, tx).await; + + let raw_txs = conn + .transactions_web3_dal() + .get_raw_miniblock_transactions(MiniblockNumber(0)) + .await + .unwrap(); + assert!(raw_txs.is_empty()); + + let raw_txs = conn + .transactions_web3_dal() + .get_raw_miniblock_transactions(MiniblockNumber(1)) + .await + .unwrap(); + assert_eq!(raw_txs.len(), 1); + assert_eq!(raw_txs[0].hash(), tx_hash); } } diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index 51e059baa608..408bb7b4cf7b 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -21,20 +21,19 @@ use crate::StorageProcessor; #[derive(Debug)] pub struct WitnessGeneratorDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c>, } impl WitnessGeneratorDal<'_, '_> { - pub fn get_next_basic_circuit_witness_job( + pub async fn get_next_basic_circuit_witness_job( &mut self, processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, ) -> Option { - async_std::task::block_on(async { - let processing_timeout = pg_interval_from_duration(processing_timeout); - let result: Option = sqlx::query!( - " + let processing_timeout = pg_interval_from_duration(processing_timeout); + let result: Option = sqlx::query!( + " UPDATE witness_inputs SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() @@ -54,67 +53,66 @@ impl WitnessGeneratorDal<'_, '_> { ) RETURNING witness_inputs.* ", - &processing_timeout, - max_attempts as i32, - last_l1_batch_to_process as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| WitnessGeneratorJobMetadata { - block_number: L1BatchNumber(row.l1_batch_number as u32), - proofs: vec![], - }); + &processing_timeout, + max_attempts as i32, + last_l1_batch_to_process as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| WitnessGeneratorJobMetadata { + block_number: L1BatchNumber(row.l1_batch_number as u32), + proofs: vec![], + }); - result - }) + result } - pub fn get_witness_generated_l1_batches(&mut self) -> Vec<(L1BatchNumber, AggregationRound)> { - [ + pub async fn get_witness_generated_l1_batches( + &mut self, + ) -> Vec<(L1BatchNumber, AggregationRound)> { + let mut generated_batches = Vec::with_capacity(4); + for round in [ "node_aggregation_witness_jobs", "leaf_aggregation_witness_jobs", "scheduler_witness_jobs", "witness_inputs", - ] - .map(|round| { - async_std::task::block_on(async { - let record = sqlx::query(&format!( - "SELECT MAX(l1_batch_number) as l1_batch FROM {} WHERE status='successful'", - round - )) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - ( - L1BatchNumber( - record - .get::, &str>("l1_batch") - .unwrap_or_default() as u32, - ), - match round { - "node_aggregation_witness_jobs" => AggregationRound::NodeAggregation, - "leaf_aggregation_witness_jobs" => AggregationRound::LeafAggregation, - "scheduler_witness_jobs" => AggregationRound::Scheduler, - "witness_inputs" => AggregationRound::BasicCircuits, - _ => unreachable!(), - }, - ) - }) - }) - .to_vec() + ] { + let record = sqlx::query(&format!( + "SELECT MAX(l1_batch_number) as l1_batch FROM {} WHERE status='successful'", + round + )) + .fetch_one(self.storage.conn()) + .await + .unwrap(); + let generated_batch = ( + L1BatchNumber( + record + .get::, &str>("l1_batch") + .unwrap_or_default() as u32, + ), + match round { + "node_aggregation_witness_jobs" => AggregationRound::NodeAggregation, + "leaf_aggregation_witness_jobs" => AggregationRound::LeafAggregation, + "scheduler_witness_jobs" => AggregationRound::Scheduler, + "witness_inputs" => AggregationRound::BasicCircuits, + _ => unreachable!(), + }, + ); + generated_batches.push(generated_batch); + } + generated_batches } - pub fn get_next_leaf_aggregation_witness_job( + pub async fn get_next_leaf_aggregation_witness_job( &mut self, processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, ) -> Option { - async_std::task::block_on(async { - let processing_timeout = pg_interval_from_duration(processing_timeout); - sqlx::query!( - " + let processing_timeout = pg_interval_from_duration(processing_timeout); + let record = sqlx::query!( + " UPDATE leaf_aggregation_witness_jobs SET status = 'in_progress', attempts = attempts + 1, updated_at = now(), processing_started_at = now() @@ -133,26 +131,29 @@ impl WitnessGeneratorDal<'_, '_> { SKIP LOCKED ) RETURNING leaf_aggregation_witness_jobs.* - ", &processing_timeout, - max_attempts as i32, - last_l1_batch_to_process as i64 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| { - let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); - let number_of_basic_circuits = row.number_of_basic_circuits; - - // Now that we have a job in `queued` status, we need to enrich it with the computed proofs. - // We select `aggregation_round = 0` to only get basic circuits. - // Note that at this point there cannot be any other circuits anyway, - // but we keep the check for explicitness - let basic_circuits_proofs: Vec< - Proof>>, - > = self.load_proofs_for_block(l1_batch_number, AggregationRound::BasicCircuits); - - assert_eq!( + ", + &processing_timeout, + max_attempts as i32, + last_l1_batch_to_process as i64 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + if let Some(row) = record { + let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); + let number_of_basic_circuits = row.number_of_basic_circuits; + + // Now that we have a job in `queued` status, we need to enrich it with the computed proofs. + // We select `aggregation_round = 0` to only get basic circuits. + // Note that at this point there cannot be any other circuits anyway, + // but we keep the check for explicitness + let basic_circuits_proofs: Vec< + Proof>>, + > = self + .load_proofs_for_block(l1_batch_number, AggregationRound::BasicCircuits) + .await; + + assert_eq!( basic_circuits_proofs.len(), number_of_basic_circuits as usize, "leaf_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed basic proofs, which is different from expected {}", @@ -161,23 +162,24 @@ impl WitnessGeneratorDal<'_, '_> { number_of_basic_circuits ); - WitnessGeneratorJobMetadata { - block_number: l1_batch_number, - proofs: basic_circuits_proofs, - } - }) - }) + Some(WitnessGeneratorJobMetadata { + block_number: l1_batch_number, + proofs: basic_circuits_proofs, + }) + } else { + None + } } - pub fn get_next_node_aggregation_witness_job( + pub async fn get_next_node_aggregation_witness_job( &mut self, processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, ) -> Option { - async_std::task::block_on(async { + { let processing_timeout = pg_interval_from_duration(processing_timeout); - sqlx::query!( + let record = sqlx::query!( " UPDATE node_aggregation_witness_jobs SET status = 'in_progress', attempts = attempts + 1, @@ -197,24 +199,27 @@ impl WitnessGeneratorDal<'_, '_> { SKIP LOCKED ) RETURNING node_aggregation_witness_jobs.* - ", &processing_timeout, + ", + &processing_timeout, max_attempts as i32, last_l1_batch_to_process as i64, ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| { - let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); - let number_of_leaf_circuits = row.number_of_leaf_circuits.expect("number_of_leaf_circuits is not found in a `queued` `node_aggregation_witness_jobs` job"); - - // Now that we have a job in `queued` status, we need to enrich it with the computed proofs. - // We select `aggregation_round = 1` to only get leaf aggregation circuits - let leaf_circuits_proofs: Vec< - Proof>>, - > = self.load_proofs_for_block(l1_batch_number, AggregationRound::LeafAggregation); - - assert_eq!( + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + if let Some(row) = record { + let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); + let number_of_leaf_circuits = row.number_of_leaf_circuits.expect("number_of_leaf_circuits is not found in a `queued` `node_aggregation_witness_jobs` job"); + + // Now that we have a job in `queued` status, we need to enrich it with the computed proofs. + // We select `aggregation_round = 1` to only get leaf aggregation circuits + let leaf_circuits_proofs: Vec< + Proof>>, + > = self + .load_proofs_for_block(l1_batch_number, AggregationRound::LeafAggregation) + .await; + + assert_eq!( leaf_circuits_proofs.len(), number_of_leaf_circuits as usize, "node_aggregation_witness_job for l1 batch {} is in status `queued`, but there are only {} computed leaf proofs, which is different from expected {}", @@ -222,23 +227,25 @@ impl WitnessGeneratorDal<'_, '_> { leaf_circuits_proofs.len(), number_of_leaf_circuits ); - WitnessGeneratorJobMetadata { - block_number: l1_batch_number, - proofs: leaf_circuits_proofs, - } + Some(WitnessGeneratorJobMetadata { + block_number: l1_batch_number, + proofs: leaf_circuits_proofs, }) - }) + } else { + None + } + } } - pub fn get_next_scheduler_witness_job( + pub async fn get_next_scheduler_witness_job( &mut self, processing_timeout: Duration, max_attempts: u32, last_l1_batch_to_process: u32, ) -> Option { - async_std::task::block_on(async { + { let processing_timeout = pg_interval_from_duration(processing_timeout); - sqlx::query!( + let record = sqlx::query!( " UPDATE scheduler_witness_jobs SET status = 'in_progress', attempts = attempts + 1, @@ -258,22 +265,25 @@ impl WitnessGeneratorDal<'_, '_> { SKIP LOCKED ) RETURNING scheduler_witness_jobs.* - ", &processing_timeout, + ", + &processing_timeout, max_attempts as i32, last_l1_batch_to_process as i64 ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| { - let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); - // Now that we have a job in `queued` status, we need to enrich it with the computed proof. - // We select `aggregation_round = 2` to only get node aggregation circuits - let leaf_circuits_proofs: Vec< - Proof>>, - > = self.load_proofs_for_block(l1_batch_number, AggregationRound::NodeAggregation); - - assert_eq!( + .fetch_optional(self.storage.conn()) + .await + .unwrap(); + if let Some(row) = record { + let l1_batch_number = L1BatchNumber(row.l1_batch_number as u32); + // Now that we have a job in `queued` status, we need to enrich it with the computed proof. + // We select `aggregation_round = 2` to only get node aggregation circuits + let leaf_circuits_proofs: Vec< + Proof>>, + > = self + .load_proofs_for_block(l1_batch_number, AggregationRound::NodeAggregation) + .await; + + assert_eq!( leaf_circuits_proofs.len(), 1usize, "scheduler_job for l1 batch {} is in status `queued`, but there is {} computed node proofs. We expect exactly one node proof.", @@ -281,20 +291,22 @@ impl WitnessGeneratorDal<'_, '_> { leaf_circuits_proofs.len() ); - WitnessGeneratorJobMetadata { - block_number: l1_batch_number, - proofs: leaf_circuits_proofs, - } + Some(WitnessGeneratorJobMetadata { + block_number: l1_batch_number, + proofs: leaf_circuits_proofs, }) - }) + } else { + None + } + } } - fn load_proofs_for_block( + async fn load_proofs_for_block( &mut self, block_number: L1BatchNumber, aggregation_round: AggregationRound, ) -> Vec>>> { - async_std::task::block_on(async { + { sqlx::query!( " SELECT circuit_type, result from prover_jobs @@ -316,16 +328,16 @@ impl WitnessGeneratorDal<'_, '_> { .expect("cannot deserialize proof")) }) .collect::>>>>() - }) + } } - pub fn mark_witness_job_as_successful( + pub async fn mark_witness_job_as_successful( &mut self, block_number: L1BatchNumber, aggregation_round: AggregationRound, time_taken: Duration, ) { - async_std::task::block_on(async { + ({ let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( "UPDATE {} @@ -342,12 +354,12 @@ impl WitnessGeneratorDal<'_, '_> { } /// Is invoked by the prover when all the required proofs are computed - pub fn mark_witness_job_as_queued( + pub async fn mark_witness_job_as_queued( &mut self, block_number: L1BatchNumber, aggregation_round: AggregationRound, ) { - async_std::task::block_on(async { + ({ let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( "UPDATE {} @@ -362,12 +374,12 @@ impl WitnessGeneratorDal<'_, '_> { }); } - pub fn mark_witness_job_as_skipped( + pub async fn mark_witness_job_as_skipped( &mut self, block_number: L1BatchNumber, aggregation_round: AggregationRound, ) { - async_std::task::block_on(async { + ({ let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( "UPDATE {} @@ -378,80 +390,62 @@ impl WitnessGeneratorDal<'_, '_> { let mut query = sqlx::query(&sql); query = query.bind(block_number.0 as i64); - let mut transaction = self.storage.start_transaction().await; - query.execute(transaction.conn()).await.unwrap(); - - transaction - .blocks_dal() - .set_skip_proof_for_l1_batch(block_number); - transaction.commit().await; + query.execute(self.storage.conn()).await.unwrap(); }); } /// Is invoked by the Witness Generator when the previous aggregation round is complete - pub fn mark_witness_job_as_waiting_for_proofs( + pub async fn mark_witness_job_as_waiting_for_proofs( &mut self, block_number: L1BatchNumber, aggregation_round: AggregationRound, ) { - async_std::task::block_on(async { - let table_name = Self::input_table_name_for(aggregation_round); - let sql = format!( - "UPDATE {} + let table_name = Self::input_table_name_for(aggregation_round); + let sql = format!( + "UPDATE {} SET status = 'waiting_for_proofs', updated_at = now() WHERE l1_batch_number = $1", - table_name - ); - let mut query = sqlx::query(&sql); - query = query.bind(block_number.0 as i64); + table_name + ); + let mut query = sqlx::query(&sql); + query = query.bind(block_number.0 as i64); - query.execute(self.storage.conn()).await.unwrap(); - }); + query.execute(self.storage.conn()).await.unwrap(); } - pub fn mark_witness_job_as_failed( + pub async fn mark_witness_job_as_failed( &mut self, - block_number: L1BatchNumber, aggregation_round: AggregationRound, + l1_batch_number: L1BatchNumber, time_taken: Duration, error: String, - max_attempts: u32, - ) { - async_std::task::block_on(async { - let table_name = Self::input_table_name_for(aggregation_round); - let sql = format!( - "UPDATE {} + ) -> u32 { + let table_name = Self::input_table_name_for(aggregation_round); + let sql = format!( + "UPDATE {} SET status = 'failed', updated_at = now(), time_taken = $1, error = $2 WHERE l1_batch_number = $3 RETURNING attempts ", - table_name - ); - let mut query = sqlx::query(&sql); - query = query.bind(duration_to_naive_time(time_taken)); - query = query.bind(error); - query = query.bind(block_number.0 as i64); - - let mut transaction = self.storage.start_transaction().await; - let attempts = query - .fetch_one(transaction.conn()) - .await - .unwrap() - .get::("attempts"); - if attempts as u32 >= max_attempts { - transaction - .blocks_dal() - .set_skip_proof_for_l1_batch(block_number); - } - transaction.commit().await; - }) + table_name + ); + let mut query = sqlx::query(&sql); + query = query.bind(duration_to_naive_time(time_taken)); + query = query.bind(error); + query = query.bind(l1_batch_number.0 as i64); + // returns the number of attempts of the job + query + .fetch_one(self.storage.conn()) + .await + .unwrap() + .get::("attempts") as u32 } /// Creates a leaf_aggregation_job in `waiting_for_proofs` status, /// and also a node_aggregation_job and scheduler_job in `waiting_for_artifacts` status. /// The jobs will be advanced to `waiting_for_proofs` by the `Witness Generator` when the corresponding artifacts are computed, /// and to `queued` by the `Prover` when all the dependency proofs are computed - pub fn create_aggregation_jobs( + pub async fn create_aggregation_jobs( &mut self, block_number: L1BatchNumber, basic_circuits_blob_url: &str, @@ -459,7 +453,7 @@ impl WitnessGeneratorDal<'_, '_> { number_of_basic_circuits: usize, scheduler_witness_blob_url: &str, ) { - async_std::task::block_on(async { + { let started_at = Instant::now(); sqlx::query!( @@ -506,7 +500,7 @@ impl WitnessGeneratorDal<'_, '_> { .unwrap(); metrics::histogram!("dal.request", started_at.elapsed(), "method" => "create_aggregation_jobs"); - }) + } } /// Saves artifacts in node_aggregation_job @@ -514,14 +508,14 @@ impl WitnessGeneratorDal<'_, '_> { /// it will be advanced to `queued` by the prover when all the dependency proofs are computed. /// If the node aggregation job was already `queued` in case of connrecunt run of same leaf aggregation job /// we keep the status as is to prevent data race. - pub fn save_leaf_aggregation_artifacts( + pub async fn save_leaf_aggregation_artifacts( &mut self, block_number: L1BatchNumber, number_of_leaf_circuits: usize, leaf_layer_subqueues_blob_url: &str, aggregation_outputs_blob_url: &str, ) { - async_std::task::block_on(async { + { let started_at = Instant::now(); sqlx::query!( " @@ -547,19 +541,19 @@ impl WitnessGeneratorDal<'_, '_> { started_at.elapsed(), "method" => "save_leaf_aggregation_artifacts" ); - }) + } } /// Saves artifacts in `scheduler_artifacts_jobs` and advances it to `waiting_for_proofs` status. /// It will be advanced to `queued` by the prover when all the dependency proofs are computed. /// If the scheduler witness job was already queued the in case of concurrent run /// of same node aggregation job, we keep the status as is to prevent data race. - pub fn save_node_aggregation_artifacts( + pub async fn save_node_aggregation_artifacts( &mut self, block_number: L1BatchNumber, node_aggregations_blob_url: &str, ) { - async_std::task::block_on(async { + { let started_at = Instant::now(); sqlx::query!( " @@ -581,15 +575,15 @@ impl WitnessGeneratorDal<'_, '_> { started_at.elapsed(), "method" => "save_node_aggregation_artifacts", ); - }) + } } - pub fn save_final_aggregation_result( + pub async fn save_final_aggregation_result( &mut self, block_number: L1BatchNumber, aggregation_result_coords: [[u8; 32]; 4], ) { - async_std::task::block_on(async { + { let aggregation_result_coords_serialized = bincode::serialize(&aggregation_result_coords) .expect("cannot serialize aggregation_result_coords"); @@ -606,14 +600,14 @@ impl WitnessGeneratorDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_witness_jobs_stats( + pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, ) -> JobCountStatistics { - async_std::task::block_on(async { + { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" @@ -637,7 +631,7 @@ impl WitnessGeneratorDal<'_, '_> { failed: results.remove("failed").unwrap_or(0i64) as usize, successful: results.remove("successful").unwrap_or(0i64) as usize, } - }) + } } fn input_table_name_for(aggregation_round: AggregationRound) -> &'static str { @@ -649,7 +643,7 @@ impl WitnessGeneratorDal<'_, '_> { } } - pub fn get_jobs( + pub async fn get_jobs( &mut self, opts: GetWitnessJobsParams, ) -> Result, sqlx::Error> { @@ -705,17 +699,16 @@ impl WitnessGeneratorDal<'_, '_> { let query = sqlx::query_as(&sql); - let x = - async_std::task::block_on(async move { query.fetch_all(self.storage.conn()).await }); - - Ok(x? + Ok(query + .fetch_all(self.storage.conn()) + .await? .into_iter() .map(|x: StorageWitnessJobInfo| x.into()) .collect()) } - pub fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { - async_std::task::block_on(async { + pub async fn save_witness_inputs(&mut self, block_number: L1BatchNumber, object_key: &str) { + { sqlx::query!( "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, created_at, updated_at) \ VALUES ($1, $2, $3, 'queued', now(), now()) @@ -727,14 +720,14 @@ impl WitnessGeneratorDal<'_, '_> { .fetch_optional(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned( + pub async fn get_basic_circuit_and_circuit_inputs_blob_urls_to_be_cleaned( &mut self, limit: u8, ) -> Vec<(i64, (String, String))> { - async_std::task::block_on(async { + { let job_ids = sqlx::query!( r#" SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs @@ -761,14 +754,14 @@ impl WitnessGeneratorDal<'_, '_> { ) }) .collect() - }) + } } - pub fn get_leaf_layer_subqueues_and_aggregation_outputs_blob_urls_to_be_cleaned( + pub async fn get_leaf_layer_subqueues_and_aggregation_outputs_blob_urls_to_be_cleaned( &mut self, limit: u8, ) -> Vec<(i64, (String, String))> { - async_std::task::block_on(async { + { let job_ids = sqlx::query!( r#" SELECT l1_batch_number, leaf_layer_subqueues_blob_url, aggregation_outputs_blob_url FROM node_aggregation_witness_jobs @@ -795,14 +788,14 @@ impl WitnessGeneratorDal<'_, '_> { ) }) .collect() - }) + } } - pub fn get_scheduler_witness_and_node_aggregations_blob_urls_to_be_cleaned( + pub async fn get_scheduler_witness_and_node_aggregations_blob_urls_to_be_cleaned( &mut self, limit: u8, ) -> Vec<(i64, (String, String))> { - async_std::task::block_on(async { + { let job_ids = sqlx::query!( r#" SELECT l1_batch_number, scheduler_witness_blob_url, final_node_aggregations_blob_url FROM scheduler_witness_jobs @@ -829,11 +822,11 @@ impl WitnessGeneratorDal<'_, '_> { ) }) .collect() - }) + } } - pub fn mark_leaf_aggregation_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { - async_std::task::block_on(async { + pub async fn mark_leaf_aggregation_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { + { sqlx::query!( r#" UPDATE leaf_aggregation_witness_jobs @@ -845,11 +838,11 @@ impl WitnessGeneratorDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn mark_node_aggregation_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { - async_std::task::block_on(async { + pub async fn mark_node_aggregation_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { + { sqlx::query!( r#" UPDATE node_aggregation_witness_jobs @@ -861,11 +854,14 @@ impl WitnessGeneratorDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn mark_scheduler_witness_gcs_blobs_as_cleaned(&mut self, l1_batch_numbers: Vec) { - async_std::task::block_on(async { + pub async fn mark_scheduler_witness_gcs_blobs_as_cleaned( + &mut self, + l1_batch_numbers: Vec, + ) { + { sqlx::query!( r#" UPDATE scheduler_witness_jobs @@ -877,11 +873,11 @@ impl WitnessGeneratorDal<'_, '_> { .execute(self.storage.conn()) .await .unwrap(); - }) + } } - pub fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { - async_std::task::block_on(async { + pub async fn move_leaf_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { + { sqlx::query!( r#" UPDATE leaf_aggregation_witness_jobs @@ -904,11 +900,11 @@ impl WitnessGeneratorDal<'_, '_> { .into_iter() .map(|row| row.l1_batch_number) .collect() - }) + } } - pub fn move_node_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { - async_std::task::block_on(async { + pub async fn move_node_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec { + { sqlx::query!( r#" UPDATE node_aggregation_witness_jobs @@ -931,11 +927,11 @@ impl WitnessGeneratorDal<'_, '_> { .into_iter() .map(|row| row.l1_batch_number) .collect() - }) + } } - pub fn move_scheduler_jobs_from_waiting_to_queued(&mut self) -> Vec { - async_std::task::block_on(async { + pub async fn move_scheduler_jobs_from_waiting_to_queued(&mut self) -> Vec { + { // There is always just one final node circuit // hence we do AND p.number_of_jobs = 1 sqlx::query!( @@ -960,7 +956,7 @@ impl WitnessGeneratorDal<'_, '_> { .into_iter() .map(|row| row.l1_batch_number) .collect() - }) + } } } diff --git a/core/lib/db_storage_provider/Cargo.toml b/core/lib/db_storage_provider/Cargo.toml deleted file mode 100644 index 85f96b64334c..000000000000 --- a/core/lib/db_storage_provider/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "zksync_db_storage_provider" -version = "1.0.0" -edition = "2018" -authors = ["The Matter Labs Team "] -homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" -keywords = ["blockchain", "zksync"] -categories = ["cryptography"] - -[dependencies] -zksync_types = { path = "../types", version = "1.0" } -zksync_dal = { path = "../dal", version = "1.0" } diff --git a/core/lib/db_storage_provider/src/lib.rs b/core/lib/db_storage_provider/src/lib.rs deleted file mode 100644 index 605a4f40f723..000000000000 --- a/core/lib/db_storage_provider/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -use zksync_dal::StorageProcessor; -use zksync_types::{MiniblockNumber, StorageKey, StorageValue, ZkSyncReadStorage, H256}; - -#[derive(Debug)] -pub struct DbStorageProvider<'a> { - connection: StorageProcessor<'a>, - block_number: MiniblockNumber, - consider_new_l1_batch: bool, -} - -impl<'a> DbStorageProvider<'a> { - pub fn new( - connection: StorageProcessor<'a>, - block_number: MiniblockNumber, - consider_new_l1_batch: bool, - ) -> DbStorageProvider<'a> { - DbStorageProvider { - connection, - block_number, - consider_new_l1_batch, - } - } -} - -impl<'a> ZkSyncReadStorage for DbStorageProvider<'a> { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - self.connection - .storage_web3_dal() - .get_historical_value_unchecked(key, self.block_number) - .unwrap() - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.connection - .storage_web3_dal() - .is_write_initial(key, self.block_number, self.consider_new_l1_batch) - .unwrap() - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.connection - .storage_web3_dal() - .get_factory_dep_unchecked(hash, self.block_number) - .unwrap() - } -} diff --git a/core/lib/db_test_macro/src/lib.rs b/core/lib/db_test_macro/src/lib.rs index 3aefaecf9497..baeae318af2c 100644 --- a/core/lib/db_test_macro/src/lib.rs +++ b/core/lib/db_test_macro/src/lib.rs @@ -38,30 +38,51 @@ fn parse_knobs(mut input: syn::ItemFn, inside_dal_crate: bool) -> Result Result"] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" -license = "Apache-2.0" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 5dc860d20861..4e137f3da063 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -10,12 +10,13 @@ use zksync_types::web3::{ Contract, Options, }, ethabi, + helpers::CallFuture, transports::Http, types::{ - Address, BlockId, BlockNumber, Bytes, Filter, Log, Transaction, TransactionId, + Address, Block, BlockId, BlockNumber, Bytes, Filter, Log, Transaction, TransactionId, TransactionReceipt, H256, U256, U64, }, - Web3, + Transport, Web3, }; use crate::{ @@ -287,4 +288,21 @@ impl EthInterface for QueryClient { metrics::histogram!("eth_client.direct.logs", start.elapsed()); Ok(logs) } + + async fn block( + &self, + block_id: String, + component: &'static str, + ) -> Result>, Error> { + metrics::counter!("server.ethereum_gateway.call", 1, "component" => component, "method" => "block"); + let start = Instant::now(); + let block = CallFuture::new( + self.web3 + .transport() + .execute("eth_getBlockByNumber", vec![block_id.into(), false.into()]), + ) + .await?; + metrics::histogram!("eth_client.direct.block", start.elapsed()); + Ok(block) + } } diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index a16f165b8c2c..f014abd78655 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -3,10 +3,11 @@ use std::{fmt, time::Instant}; use async_trait::async_trait; -use zksync_config::ZkSyncConfig; +use zksync_config::{ContractsConfig, ETHClientConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; use zksync_eth_signer::PrivateKeySigner; use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; +use zksync_types::web3::types::Block; use zksync_types::web3::{ self, contract::{ @@ -33,16 +34,21 @@ use crate::{ pub type PKSigningClient = SigningClient; impl PKSigningClient { - pub fn from_config(config: &ZkSyncConfig) -> Self { + pub fn from_config( + eth_sender: ÐSenderConfig, + contracts_config: &ContractsConfig, + eth_client: ÐClientConfig, + ) -> Self { // Gather required data from the config. // It's done explicitly to simplify getting rid of this function later. - let main_node_url = &config.eth_client.web3_url; - let operator_private_key = config.eth_sender.sender.operator_private_key; - let operator_commit_eth_addr = config.eth_sender.sender.operator_commit_eth_addr; - let diamond_proxy_addr = config.contracts.diamond_proxy_addr; - let default_priority_fee_per_gas = - config.eth_sender.gas_adjuster.default_priority_fee_per_gas; - let l1_chain_id = config.eth_client.chain_id; + let main_node_url = ð_client.web3_url; + let operator_private_key = eth_sender + .sender + .private_key() + .expect("Operator private key is required for signing client"); + let diamond_proxy_addr = contracts_config.diamond_proxy_addr; + let default_priority_fee_per_gas = eth_sender.gas_adjuster.default_priority_fee_per_gas; + let l1_chain_id = eth_client.chain_id; let transport = web3::transports::Http::new(main_node_url).expect("Failed to create transport"); @@ -54,7 +60,7 @@ impl PKSigningClient { SigningClient::new( transport, zksync_contract(), - operator_commit_eth_addr, + operator_address, PrivateKeySigner::new(operator_private_key), diamond_proxy_addr, default_priority_fee_per_gas.into(), @@ -208,6 +214,14 @@ impl EthInterface for SigningClient { async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { self.query_client.logs(filter, component).await } + + async fn block( + &self, + block_id: String, + component: &'static str, + ) -> Result>, Error> { + self.query_client.block(block_id, component).await + } } #[async_trait] diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 62467088f39b..ca7b2d52dc6a 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -5,7 +5,7 @@ use jsonrpc_core::types::error::Error as RpcError; use std::collections::{BTreeMap, HashMap}; use std::sync::RwLock; use zksync_types::web3::contract::tokens::Detokenize; -use zksync_types::web3::types::{BlockId, Filter, Log, Transaction}; +use zksync_types::web3::types::{Block, BlockId, Filter, Log, Transaction}; use zksync_types::web3::{ contract::tokens::Tokenize, contract::Options, @@ -64,6 +64,9 @@ pub struct MockEthereum { pub current_nonce: AtomicU64, pub pending_nonce: AtomicU64, pub nonces: RwLock>, + /// If true, the mock will not check the ordering nonces of the transactions. + /// This is useful for testing the cases when the transactions are executed out of order. + pub non_ordering_confirmations: bool, } impl Default for MockEthereum { @@ -78,6 +81,7 @@ impl Default for MockEthereum { current_nonce: Default::default(), pending_nonce: Default::default(), nonces: RwLock::new([(0, 0)].into()), + non_ordering_confirmations: false, } } } @@ -109,7 +113,14 @@ impl MockEthereum { let nonce = self.current_nonce.fetch_add(1, Ordering::SeqCst); let tx_nonce = self.sent_txs.read().unwrap()[&tx_hash].nonce; - anyhow::ensure!(tx_nonce == nonce, "nonce mismatch"); + if self.non_ordering_confirmations { + if tx_nonce >= nonce { + self.current_nonce.store(tx_nonce, Ordering::SeqCst); + } + } else { + anyhow::ensure!(tx_nonce == nonce, "nonce mismatch"); + } + self.nonces.write().unwrap().insert(block_number, nonce + 1); let status = ExecutedTxStatus { @@ -168,6 +179,13 @@ impl MockEthereum { ..self } } + + pub fn with_non_ordering_confirmation(self, non_ordering_confirmations: bool) -> Self { + Self { + non_ordering_confirmations, + ..self + } + } } #[async_trait] @@ -295,6 +313,14 @@ impl EthInterface for MockEthereum { async fn logs(&self, _filter: Filter, _component: &'static str) -> Result, Error> { unimplemented!("Not needed right now") } + + async fn block( + &self, + _block_id: String, + _component: &'static str, + ) -> Result>, Error> { + unimplemented!("Not needed right now") + } } #[async_trait::async_trait] @@ -360,7 +386,7 @@ impl BoundEthInterface for MockEthereum { } #[async_trait] -impl + Sync> EthInterface for T { +impl + Send + Sync> EthInterface for T { async fn nonce_at_for_account( &self, account: Address, @@ -469,6 +495,14 @@ impl + Sync> EthInterface for T { async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error> { self.as_ref().logs(filter, component).await } + + async fn block( + &self, + block_id: String, + component: &'static str, + ) -> Result>, Error> { + self.as_ref().block(block_id, component).await + } } #[async_trait::async_trait] diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index cd5888ba4e41..39288b644951 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -13,8 +13,8 @@ use zksync_types::{ }, ethabi, types::{ - Address, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, - H256, U256, U64, + Address, Block, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, + H160, H256, U256, U64, }, }, L1ChainId, @@ -37,7 +37,7 @@ use zksync_types::{ /// unnecessary high amount of Web3 calls. Implementations are advices to count invokations /// per component and expose them to prometheus, e.g. via `metrics` crate. #[async_trait] -pub trait EthInterface { +pub trait EthInterface: Sync + Send { /// Returns the nonce of the provided account at the specified block. async fn nonce_at_for_account( &self, @@ -127,6 +127,13 @@ pub trait EthInterface { /// Returns the logs for the specified filter. async fn logs(&self, filter: Filter, component: &'static str) -> Result, Error>; + + /// Returns the block header for the specified block number or hash. + async fn block( + &self, + block_id: String, + component: &'static str, + ) -> Result>, Error>; } /// An extension of `EthInterface` trait, which is used to perform queries that are bound to @@ -141,10 +148,7 @@ pub trait EthInterface { /// 2. Consider adding the "unbound" version to the `EthInterface` trait and create a default method /// implementation that invokes `contract` / `contract_addr` / `sender_account` methods. #[async_trait] -pub trait BoundEthInterface: EthInterface -where - Self: Sync + Send, -{ +pub trait BoundEthInterface: EthInterface { /// ABI of the contract that is used by the implementor. fn contract(&self) -> ðabi::Contract; diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index 66ede9580f5f..0a3bf67ca100 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -4,8 +4,8 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 1999e217a384..ce4540c151b7 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -3,7 +3,7 @@ use error::SignerError; use zksync_types::tx::primitives::PackedEthSignature; use zksync_types::{Address, EIP712TypedStructure, Eip712Domain}; -use crate::raw_ethereum_tx::TransactionParameters; +pub use crate::raw_ethereum_tx::TransactionParameters; pub use json_rpc_signer::JsonRpcSigner; pub use pk_signer::PrivateKeySigner; diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index 4a7dd7e753d9..b723d36fbcc3 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -4,9 +4,10 @@ version = "0.1.0" edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] +async-trait = "0.1" diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 0fed16544152..862297aaf454 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -1,7 +1,11 @@ +/// Public re-export for other crates to be able to implement the interface. +pub use async_trait::async_trait; + /// Interface to be used for healthchecks /// There's a list of health checks that are looped in the /healthcheck endpoint to verify status +#[async_trait] pub trait CheckHealth: Send + Sync + 'static { - fn check_health(&self) -> CheckHealthStatus; + async fn check_health(&self) -> CheckHealthStatus; } /// Used to return health status when checked. diff --git a/core/lib/mempool/Cargo.toml b/core/lib/mempool/Cargo.toml index 45259f85f162..89a16c794978 100644 --- a/core/lib/mempool/Cargo.toml +++ b/core/lib/mempool/Cargo.toml @@ -4,8 +4,8 @@ version = "1.0.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 572a897f157c..8f05a63c3de0 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -1,31 +1,34 @@ [package] name = "zksync_merkle_tree" version = "1.0.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" -repository = "https://github.com/matter-labs/zksync-2" -license = "Apache-2.0" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zksync_types = { path = "../../lib/types", version = "1.0" } -zksync_crypto = { path = "../../lib/crypto", version = "1.0" } -zksync_storage = { path = "../../lib/storage", version = "1.0", default-features = false } -zksync_utils = { path = "../../lib/utils", version = "1.0" } -zksync_config = { path = "../../lib/config", version = "1.0" } +vlog = { path = "../vlog", version = "1.0" } +zksync_types = { path = "../types", version = "1.0" } +zksync_crypto = { path = "../crypto", version = "1.0" } +zksync_storage = { path = "../storage", version = "1.0", default-features = false } -vlog = { path = "../../lib/vlog", version = "1.0" } - -itertools = "0.10" -rayon = "1.3.0" -once_cell = "1.7" +leb128 = "0.2.5" +metrics = "0.20.1" +once_cell = "1.17.1" +rayon = "1.3.1" thiserror = "1.0" -bincode = "1" -serde = "1.0.90" -metrics = "0.20" -byteorder = "1.3" [dev-dependencies] +zksync_config = { path = "../config", version = "1.0" } + +assert_matches = "1.5.0" +clap = { version = "4.2.2", features = ["derive"] } +insta = { version = "1.29.0", features = ["yaml"] } +rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_with = { version = "1", features = ["hex"] } tempfile = "3.0.2" diff --git a/core/lib/merkle_tree/README.md b/core/lib/merkle_tree/README.md index 9ebe0d7db9ea..edf2ec20c417 100644 --- a/core/lib/merkle_tree/README.md +++ b/core/lib/merkle_tree/README.md @@ -1,9 +1,117 @@ # Merkle Tree -This cargo contains the basic functions to create and modify the Merkle Tree. +Binary Merkle tree implementation based on amortized radix-16 Merkle tree (AR16MT) described in the [Jellyfish Merkle +tree] white paper. Unlike Jellyfish Merkle tree, our construction uses vanilla binary tree hashing algorithm to make it +easier for the circuit creation. The depth of the tree is 256, and Blake2 is used as the hashing function. -We're using a classic binary tree here (not Trie, not B-trees etc) to make it easier for the circuit creation. Also the -depth of the tree is fixed to 256. +## Snapshot tests -At any given moment, the storage keeps the tree only at a given block (and that block number is encoded in -`block_number` row) - it can be accessed via `ZkSyncTree` stuct. +In order to check backward compatibility of the tree implementation, it is snapshot-tested using the [`insta`] crate. If +any of snapshot tests fail, be sure to either fix your code, or update the snapshots being aware that the made changes +are probably not backward-compatible. + +## Benchmarking + +The `loadtest` example is a CLI app allowing to measure tree performance. It allows using the in-memory or RocksDB +storage backend, and Blake2 or no-op hashing functions. For example, the following command launches a benchmark with 75 +blocks each containing 150,000 insertion operations. + +```shell +cargo run --release -p zksync_merkle_tree --example loadtest -- \ + --chunk-size=500 75 150000 +``` + +The order of timings should be as follows (measured on MacBook Pro with 12-core Apple M2 Max CPU and 32 GB DDR5 RAM +using the command line above): + +```text +Processing block #74 +[metric] merkle_tree.load_nodes = 0.400870959 seconds +[metric] merkle_tree.extend_patch = 0.119743375 seconds +[metric] merkle_tree.extend_patch.new_leaves = 150000 +[metric] merkle_tree.extend_patch.new_internal_nodes = 57588 +[metric] merkle_tree.extend_patch.moved_leaves = 53976 +[metric] merkle_tree.extend_patch.updated_leaves = 0 +[metric] merkle_tree.extend_patch.avg_leaf_level = 26.74396987880927 +[metric] merkle_tree.extend_patch.max_leaf_level = 44 +[metric] merkle_tree.extend_patch.db_reads = 278133 +[metric] merkle_tree.extend_patch.patch_reads = 96024 +[metric] merkle_tree.finalize_patch = 0.707021 seconds +[metric] merkle_tree.leaf_count = 11250000 +[metric] merkle_tree.finalize_patch.hashed_bytes = 3205548448 bytes +Processed block #74 in 1.228553208s, root hash = 0x1ddec3794d0a1c5b44c2d9c7aa985cc61c70e988da2e6f2a810e0eb37f4322c0 +Committed block #74 in 571.588041ms +Verifying tree consistency... +Verified tree consistency in 37.478218666s +``` + +Full tree mode (with proofs) launched with the following command: + +```shell +cargo run --release -p zksync_merkle_tree --example loadtest -- \ + --chunk-size=500 --proofs --reads=50000 75 150000 +``` + +...has the following order of timings: + +```text +Processing block #74 +[metric] merkle_tree.load_nodes = 0.5310345 seconds +[metric] merkle_tree.extend_patch = 0.905285834 seconds +[metric] merkle_tree.extend_patch.new_leaves = 150000 +[metric] merkle_tree.extend_patch.new_internal_nodes = 57588 +[metric] merkle_tree.extend_patch.moved_leaves = 53976 +[metric] merkle_tree.extend_patch.updated_leaves = 0 +[metric] merkle_tree.extend_patch.avg_leaf_level = 26.74396987880927 +[metric] merkle_tree.extend_patch.max_leaf_level = 44 +[metric] merkle_tree.extend_patch.key_reads = 50000 +[metric] merkle_tree.extend_patch.db_reads = 400271 +[metric] merkle_tree.extend_patch.patch_reads = 96024 +[metric] merkle_tree.leaf_count = 11250000 +[metric] merkle_tree.finalize_patch = 0.302226041 seconds +[metric] merkle_tree.finalize_patch.hashed_bytes = 3439057088 bytes +Processed block #74 in 1.814916125s, root hash = 0x1ddec3794d0a1c5b44c2d9c7aa985cc61c70e988da2e6f2a810e0eb37f4322c0 +Committed block #74 in 904.560667ms +Verifying tree consistency... +Verified tree consistency in 37.935639292s +``` + +Launch the example with the `--help` flag for more details. + +### Benchmarking pruning + +`--prune` option enables tree pruning with some reasonable parameters and just the latest tree version retained. The +pruner should output `merkle_tree.pruning.*` metrics like this: + +```text +[histogram] merkle_tree.pruning.load_stale_keys = 0.009145916 seconds +[histogram] rocksdb.write.batch_size{db=merkle_tree} = 649934 bytes +[gauge] rocksdb.live_data_size{db=merkle_tree, cf=default} = 1802196863 bytes +[gauge] rocksdb.total_sst_size{db=merkle_tree, cf=default} = 2057174959 bytes +[gauge] rocksdb.total_mem_table_size{db=merkle_tree, cf=default} = 67110912 bytes +[gauge] rocksdb.live_data_size{db=merkle_tree, cf=stale_keys} = 3275975 bytes +[gauge] rocksdb.total_sst_size{db=merkle_tree, cf=stale_keys} = 5141413 bytes +[gauge] rocksdb.total_mem_table_size{db=merkle_tree, cf=stale_keys} = 19924992 bytes +[histogram] merkle_tree.pruning.apply_patch = 0.031769875 seconds +[gauge] merkle_tree.pruning.target_retained_version = 2999 +[histogram] merkle_tree.pruning.key_count = 48154 +[gauge] merkle_tree.pruning.deleted_stale_key_versions{bound=start} = 2997 +[gauge] merkle_tree.pruning.deleted_stale_key_versions{bound=end} = 3000 +``` + +(at the end of the test in a setup with 3,000 blocks x 5,000 write ops / block). The same setup without pruning has the +following order of RocksDB storage consumption at the end of the test: + +```text +[gauge] rocksdb.live_data_size{db=merkle_tree, cf=default} = 17723205116 bytes +[gauge] rocksdb.total_sst_size{db=merkle_tree, cf=default} = 17981011113 bytes +[gauge] rocksdb.total_mem_table_size{db=merkle_tree, cf=default} = 46139392 bytes +[gauge] rocksdb.live_data_size{db=merkle_tree, cf=stale_keys} = 441477770 bytes +[gauge] rocksdb.total_sst_size{db=merkle_tree, cf=stale_keys} = 441477770 bytes +[gauge] rocksdb.total_mem_table_size{db=merkle_tree, cf=stale_keys} = 19924992 bytes +``` + +I.e., pruning reduces RocksDB size ~8.7 times in this case. + +[jellyfish merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf +[`insta`]: https://docs.rs/insta/ diff --git a/core/lib/merkle_tree2/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs similarity index 80% rename from core/lib/merkle_tree2/examples/loadtest/main.rs rename to core/lib/merkle_tree/examples/loadtest/main.rs index 52ef2e5695f7..da0c84071038 100644 --- a/core/lib/merkle_tree2/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -7,11 +7,14 @@ use clap::Parser; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use tempfile::TempDir; -use std::time::Instant; +use std::{ + thread, + time::{Duration, Instant}, +}; use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_merkle_tree2::{ - Database, HashTree, MerkleTree, PatchSet, RocksDBWrapper, TreeInstruction, +use zksync_merkle_tree::{ + Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeInstruction, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -50,6 +53,9 @@ struct Cli { /// Seed to use in the RNG for reproducibility. #[arg(long = "rng-seed", default_value = "0")] rng_seed: u64, + /// Enables tree pruning. + #[arg(long = "prune", conflicts_with = "in_memory")] + prune: bool, } impl Cli { @@ -59,6 +65,7 @@ impl Cli { let (mut mock_db, mut rocksdb); let mut _temp_dir = None; + let mut pruner_handles = None; let db: &mut dyn Database = if self.in_memory { mock_db = PatchSet::default(); &mut mock_db @@ -73,6 +80,12 @@ impl Cli { rocksdb.set_multi_get_chunk_size(chunk_size); } + if self.prune { + let (mut pruner, pruner_handle) = MerkleTreePruner::new(rocksdb.clone(), 0); + pruner.set_poll_interval(Duration::from_secs(10)); + let pruner_thread = thread::spawn(|| pruner.run()); + pruner_handles = Some((pruner_handle, pruner_thread)); + } _temp_dir = Some(dir); &mut rocksdb }; @@ -80,6 +93,7 @@ impl Cli { let hasher: &dyn HashTree = if self.no_hashing { &() } else { &Blake2Hasher }; let mut rng = StdRng::seed_from_u64(self.rng_seed); + let mut tree = MerkleTree::with_hasher(db, hasher); let mut next_key_idx = 0_u64; let mut next_value_idx = 0_u64; for version in 0..self.commit_count { @@ -98,36 +112,34 @@ impl Cli { println!("Processing block #{version}"); let start = Instant::now(); - let tree = MerkleTree::with_hasher(&*db, hasher); - let (root_hash, patch) = if self.proofs { + let root_hash = if self.proofs { let reads = Self::generate_keys(read_indices.into_iter()) .map(|key| (key, TreeInstruction::Read)); let instructions = kvs .map(|(key, hash)| (key, TreeInstruction::Write(hash))) .chain(reads) .collect(); - let (output, patch) = tree.extend_with_proofs(instructions); - (output.root_hash().unwrap(), patch) + let output = tree.extend_with_proofs(instructions); + output.root_hash().unwrap() } else { - let (output, patch) = tree.extend(kvs.collect()); - (output.root_hash, patch) + let output = tree.extend(kvs.collect()); + output.root_hash }; let elapsed = start.elapsed(); println!("Processed block #{version} in {elapsed:?}, root hash = {root_hash:?}"); - - let start = Instant::now(); - db.apply_patch(patch); - let elapsed = start.elapsed(); - println!("Committed block #{version} in {elapsed:?}"); } println!("Verifying tree consistency..."); let start = Instant::now(); - MerkleTree::with_hasher(&*db, hasher) - .verify_consistency(self.commit_count - 1) + tree.verify_consistency(self.commit_count - 1) .expect("tree consistency check failed"); let elapsed = start.elapsed(); println!("Verified tree consistency in {elapsed:?}"); + + if let Some((pruner_handle, pruner_thread)) = pruner_handles { + pruner_handle.abort(); + pruner_thread.join().unwrap(); + } } fn generate_keys(key_indexes: impl Iterator) -> impl Iterator { diff --git a/core/lib/merkle_tree2/examples/loadtest/recorder.rs b/core/lib/merkle_tree/examples/loadtest/recorder.rs similarity index 65% rename from core/lib/merkle_tree2/examples/loadtest/recorder.rs rename to core/lib/merkle_tree/examples/loadtest/recorder.rs index 1b0dddff164e..1c86fac6e1fc 100644 --- a/core/lib/merkle_tree2/examples/loadtest/recorder.rs +++ b/core/lib/merkle_tree/examples/loadtest/recorder.rs @@ -1,11 +1,13 @@ //! Simple `metrics::Recorder` implementation that prints information to stdout. use metrics::{ - Counter, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Recorder, SharedString, Unit, + Counter, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Label, Recorder, SharedString, + Unit, }; use std::{ collections::HashMap, + fmt::{self, Write as _}, sync::{ atomic::{AtomicU64, Ordering}, Arc, Mutex, @@ -14,17 +16,36 @@ use std::{ type SharedMetadata = Mutex>; +#[derive(Debug, Clone, Copy)] +enum MetricKind { + Gauge, + Histogram, +} + +impl fmt::Display for MetricKind { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(match self { + Self::Gauge => "gauge", + Self::Histogram => "histogram", + }) + } +} + #[derive(Debug)] struct PrintingMetric { + kind: MetricKind, key: KeyName, + labels: Vec