diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 8ed0bee8c51a..506ac10e38e2 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -280,7 +280,7 @@ jobs: # TODO(PLA-653): Restore bridge tests for EN. - name: Integration tests - run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|custom-erc20-bridge' + run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|custom-erc20-bridge|snapshots-creator' - name: Run Cross EN Checker run: ci_run zk run cross-en-checker diff --git a/Cargo.lock b/Cargo.lock index ec650188c8a9..dc19581dd773 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6538,6 +6538,28 @@ dependencies = [ "serde", ] +[[package]] +name = "snapshots_creator" +version = "0.1.0" +dependencies = [ + "anyhow", + "futures 0.3.28", + "prometheus_exporter", + "serde", + "serde_json", + "tokio", + "tracing", + "vise", + "vlog", + "zksync_config", + "zksync_core", + "zksync_dal", + "zksync_env_config", + "zksync_object_store", + "zksync_types", + "zksync_utils", +] + [[package]] name = "snark_wrapper" version = "0.1.0" @@ -8922,9 +8944,11 @@ dependencies = [ "anyhow", "async-trait", "bincode", + "flate2", "google-cloud-auth", "google-cloud-storage", "http", + "serde_json", "tempdir", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 75a4c7237d2b..ac01673bfb99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "core/bin/external_node", "core/bin/merkle_tree_consistency_checker", "core/bin/rocksdb_util", + "core/bin/snapshots_creator", "core/bin/storage_logs_dedup_migration", "core/bin/system-constants-generator", "core/bin/verification_key_generator_and_server", diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index aea48bc0aeb7..b84c6ce59bb6 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -338,7 +338,7 @@ impl OptionalENConfig { pub fn api_namespaces(&self) -> Vec { self.api_namespaces .clone() - .unwrap_or_else(|| Namespace::NON_DEBUG.to_vec()) + .unwrap_or_else(|| Namespace::DEFAULT.to_vec()) } pub fn max_response_body_size(&self) -> usize { diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml new file mode 100644 index 000000000000..c11cbbb49495 --- /dev/null +++ b/core/bin/snapshots_creator/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "snapshots_creator" +version = "0.1.0" +edition = "2021" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } +prometheus_exporter = { path = "../../lib/prometheus_exporter" } +zksync_config = { path = "../../lib/config" } +zksync_dal = { path = "../../lib/dal" } +zksync_env_config = { path = "../../lib/env_config" } +zksync_utils = { path = "../../lib/utils" } +zksync_types = { path = "../../lib/types" } +zksync_core = { path = "../../lib/zksync_core" } +zksync_object_store = { path = "../../lib/object_store" } +vlog = { path = "../../lib/vlog" } + +anyhow = "1.0" +tokio = { version = "1", features = ["full"] } +tracing = "0.1" +futures = "0.3" +serde = { version = "1.0.189", features = ["derive"] } +serde_json = "1.0" diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md new file mode 100644 index 000000000000..03167b803592 --- /dev/null +++ b/core/bin/snapshots_creator/README.md @@ -0,0 +1,24 @@ +# Snapshots Creator + +Snapshot creator is small command line tool for creating a snapshot of zkSync node for EN node to be able to initialize +to a certain L1 Batch. + +Snapshots do not contain full transactions history, but rather a minimal subset of information needed to bootstrap EN +node. + +Usage (local development):\ +First run `zk env dev` \ +then the creator can be run using: +`zk run snapshots_creator` + +Snapshot contents can be stored based on blob_store config either in local filesystem or GS. + +## Snapshots format + +Each snapshot consists of three types of objects (see +[snapshots.rs](https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/snapshots.rs)) : header, storage +logs chunks and factory deps: + +- Snapshot Header (currently returned by snapshots namespace of JSON-RPC API) +- Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) : +- Factory dependencies (most likely to be stored as protobufs in the very near future) diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs new file mode 100644 index 000000000000..248f2de53597 --- /dev/null +++ b/core/bin/snapshots_creator/src/chunking.rs @@ -0,0 +1,22 @@ +use std::ops; + +use zksync_types::{H256, U256}; +use zksync_utils::u256_to_h256; + +pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ops::RangeInclusive { + assert!(chunks_count > 0); + let mut stride = U256::MAX / chunks_count; + let stride_minus_one = if stride < U256::MAX { + stride += U256::one(); + stride - 1 + } else { + stride // `stride` is really 1 << 256 == U256::MAX + 1 + }; + + let start = stride * chunk_id; + let (mut end, is_overflow) = stride_minus_one.overflowing_add(start); + if is_overflow { + end = U256::MAX; + } + u256_to_h256(start)..=u256_to_h256(end) +} diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs new file mode 100644 index 000000000000..300b12572f99 --- /dev/null +++ b/core/bin/snapshots_creator/src/main.rs @@ -0,0 +1,291 @@ +mod chunking; + +use std::{cmp::max, time::Duration}; + +use anyhow::Context as _; +use prometheus_exporter::PrometheusExporterConfig; +use tokio::sync::{watch, Semaphore}; +use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +use zksync_config::{configs::PrometheusConfig, PostgresConfig, SnapshotsCreatorConfig}; +use zksync_dal::ConnectionPool; +use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_types::{ + snapshots::{ + SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, + }, + L1BatchNumber, MiniblockNumber, +}; +use zksync_utils::ceil_div; + +use crate::chunking::get_chunk_hashed_keys_range; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "snapshots_creator")] +struct SnapshotsCreatorMetrics { + storage_logs_chunks_count: Gauge, + + storage_logs_chunks_left_to_process: Gauge, + + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + snapshot_generation_duration: Histogram, + + snapshot_l1_batch: Gauge, + + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + storage_logs_processing_duration: Histogram, + + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + factory_deps_processing_duration: Histogram, +} +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); + +async fn maybe_enable_prometheus_metrics( + stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let prometheus_config = PrometheusConfig::from_env().ok(); + if let Some(prometheus_config) = prometheus_config { + let exporter_config = PrometheusExporterConfig::push( + prometheus_config.gateway_endpoint(), + prometheus_config.push_interval(), + ); + + tracing::info!("Starting prometheus exporter with config {prometheus_config:?}"); + tokio::spawn(exporter_config.run(stop_receiver)); + } else { + tracing::info!("Starting without prometheus exporter"); + } + Ok(()) +} + +async fn process_storage_logs_single_chunk( + blob_store: &dyn ObjectStore, + pool: &ConnectionPool, + semaphore: &Semaphore, + miniblock_number: MiniblockNumber, + l1_batch_number: L1BatchNumber, + chunk_id: u64, + chunks_count: u64, +) -> anyhow::Result { + let _permit = semaphore.acquire().await?; + let hashed_keys_range = get_chunk_hashed_keys_range(chunk_id, chunks_count); + let latency = METRICS.storage_logs_processing_duration.start(); + let mut conn = pool.access_storage_tagged("snapshots_creator").await?; + let logs = conn + .snapshots_creator_dal() + .get_storage_logs_chunk(miniblock_number, hashed_keys_range) + .await + .context("Error fetching storage logs count")?; + drop(conn); + let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs }; + let key = SnapshotStorageLogsStorageKey { + l1_batch_number, + chunk_id, + }; + let filename = blob_store + .put(key, &storage_logs_chunk) + .await + .context("Error storing storage logs chunk in blob store")?; + + let output_filepath_prefix = blob_store.get_storage_prefix::(); + let output_filepath = format!("{output_filepath_prefix}/{filename}"); + + let elapsed = latency.observe(); + let tasks_left = METRICS.storage_logs_chunks_left_to_process.dec_by(1) - 1; + tracing::info!( + "Finished chunk number {chunk_id}, overall_progress {}/{}, step took {elapsed:?}, output stored in {output_filepath}", + chunks_count - tasks_left, + chunks_count + ); + + Ok(output_filepath) +} + +async fn process_factory_deps( + blob_store: &dyn ObjectStore, + pool: &ConnectionPool, + miniblock_number: MiniblockNumber, + l1_batch_number: L1BatchNumber, +) -> anyhow::Result { + let latency = METRICS.factory_deps_processing_duration.start(); + let mut conn = pool.access_storage_tagged("snapshots_creator").await?; + let factory_deps = conn + .snapshots_creator_dal() + .get_all_factory_deps(miniblock_number) + .await?; + let factory_deps = SnapshotFactoryDependencies { factory_deps }; + drop(conn); + let filename = blob_store + .put(l1_batch_number, &factory_deps) + .await + .context("Error storing factory deps in blob store")?; + let output_filepath_prefix = blob_store.get_storage_prefix::(); + let output_filepath = format!("{output_filepath_prefix}/{filename}"); + let elapsed = latency.observe(); + tracing::info!( + "Finished factory dependencies, step took {elapsed:?} , output stored in {}", + output_filepath + ); + Ok(output_filepath) +} + +async fn run( + blob_store: Box, + replica_pool: ConnectionPool, + master_pool: ConnectionPool, +) -> anyhow::Result<()> { + let latency = METRICS.snapshot_generation_duration.start(); + + let config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; + + let mut conn = replica_pool + .access_storage_tagged("snapshots_creator") + .await?; + + // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch + let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; + + let mut master_conn = master_pool + .access_storage_tagged("snapshots_creator") + .await?; + if master_conn + .snapshots_dal() + .get_snapshot_metadata(l1_batch_number) + .await? + .is_some() + { + tracing::info!("Snapshot for L1 batch number {l1_batch_number} already exists, exiting",); + return Ok(()); + } + drop(master_conn); + + let last_miniblock_number_in_batch = conn + .blocks_dal() + .get_miniblock_range_of_l1_batch(l1_batch_number) + .await? + .context("Error fetching last miniblock number")? + .1; + let distinct_storage_logs_keys_count = conn + .snapshots_creator_dal() + .get_distinct_storage_logs_keys_count(l1_batch_number) + .await?; + + drop(conn); + + let chunk_size = config.storage_logs_chunk_size; + // we force at least 10 chunks to avoid situations where only one chunk is created in tests + let chunks_count = max(10, ceil_div(distinct_storage_logs_keys_count, chunk_size)); + + METRICS.storage_logs_chunks_count.set(chunks_count); + + tracing::info!( + "Creating snapshot for storage logs up to miniblock {last_miniblock_number_in_batch}, l1_batch {}", + l1_batch_number.0 + ); + tracing::info!("Starting to generate {chunks_count} chunks of expected size {chunk_size}"); + + let factory_deps_output_file = process_factory_deps( + &*blob_store, + &replica_pool, + last_miniblock_number_in_batch, + l1_batch_number, + ) + .await?; + + METRICS + .storage_logs_chunks_left_to_process + .set(chunks_count); + + let semaphore = Semaphore::new(config.concurrent_queries_count as usize); + let tasks = (0..chunks_count).map(|chunk_id| { + process_storage_logs_single_chunk( + &*blob_store, + &replica_pool, + &semaphore, + last_miniblock_number_in_batch, + l1_batch_number, + chunk_id, + chunks_count, + ) + }); + let mut storage_logs_output_files = futures::future::try_join_all(tasks).await?; + tracing::info!("Finished generating snapshot, storing progress in db"); + + let mut master_conn = master_pool + .access_storage_tagged("snapshots_creator") + .await?; + + storage_logs_output_files.sort(); + //sanity check + assert_eq!(storage_logs_output_files.len(), chunks_count as usize); + master_conn + .snapshots_dal() + .add_snapshot( + l1_batch_number, + &storage_logs_output_files, + &factory_deps_output_file, + ) + .await?; + + METRICS.snapshot_l1_batch.set(l1_batch_number.0 as u64); + + let elapsed = latency.observe(); + tracing::info!("snapshot_generation_duration: {elapsed:?}"); + tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get()); + tracing::info!( + "storage_logs_chunks_count: {}", + METRICS.storage_logs_chunks_count.get() + ); + + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let (stop_sender, stop_receiver) = watch::channel(false); + + tracing::info!("Starting snapshots creator"); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let log_format = vlog::log_format_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let sentry_url = vlog::sentry_url_from_env(); + #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. + let environment = vlog::environment_from_env(); + + maybe_enable_prometheus_metrics(stop_receiver).await?; + let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format); + if let Some(sentry_url) = sentry_url { + builder = builder + .with_sentry_url(&sentry_url) + .context("Invalid Sentry URL")? + .with_sentry_environment(environment); + } + let _guard = builder.build(); + + let object_store_config = + SnapshotsObjectStoreConfig::from_env().context("SnapshotsObjectStoreConfig::from_env()")?; + let blob_store = ObjectStoreFactory::new(object_store_config.0) + .create_store() + .await; + + let postgres_config = PostgresConfig::from_env().context("PostgresConfig")?; + let creator_config = + SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; + + let replica_pool = ConnectionPool::builder( + postgres_config.replica_url()?, + creator_config.concurrent_queries_count, + ) + .build() + .await?; + + let master_pool = ConnectionPool::singleton(postgres_config.master_url()?) + .build() + .await?; + + run(blob_store, replica_pool, master_pool).await?; + tracing::info!("Finished running snapshot creator!"); + stop_sender.send(true).ok(); + Ok(()) +} diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 710c128c951f..9acfe9025620 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -20,6 +20,7 @@ pub use self::{ proof_data_handler::ProofDataHandlerConfig, prover::{ProverConfig, ProverConfigs}, prover_group::ProverGroupConfig, + snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, witness_generator::WitnessGeneratorConfig, }; @@ -46,6 +47,7 @@ pub mod object_store; pub mod proof_data_handler; pub mod prover; pub mod prover_group; +pub mod snapshots_creator; pub mod utils; pub mod witness_generator; diff --git a/core/lib/config/src/configs/snapshots_creator.rs b/core/lib/config/src/configs/snapshots_creator.rs new file mode 100644 index 000000000000..2f37c5d3afde --- /dev/null +++ b/core/lib/config/src/configs/snapshots_creator.rs @@ -0,0 +1,18 @@ +use serde::Deserialize; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct SnapshotsCreatorConfig { + #[serde(default = "snapshots_creator_storage_logs_chunk_size_default")] + pub storage_logs_chunk_size: u64, + + #[serde(default = "snapshots_creator_concurrent_queries_count")] + pub concurrent_queries_count: u32, +} + +fn snapshots_creator_storage_logs_chunk_size_default() -> u64 { + 1_000_000 +} + +fn snapshots_creator_concurrent_queries_count() -> u32 { + 25 +} diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index aa83577aad87..4937afe387a5 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -3,7 +3,7 @@ pub use crate::configs::{ ApiConfig, ChainConfig, ContractVerifierConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, ETHWatchConfig, FetcherConfig, GasAdjusterConfig, ObjectStoreConfig, - PostgresConfig, ProverConfig, ProverConfigs, + PostgresConfig, ProverConfig, ProverConfigs, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql new file mode 100644 index 000000000000..708ff00f00e5 --- /dev/null +++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS snapshots; diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql new file mode 100644 index 000000000000..ae35521ee5ee --- /dev/null +++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql @@ -0,0 +1,9 @@ +CREATE TABLE snapshots +( + l1_batch_number BIGINT NOT NULL PRIMARY KEY, + storage_logs_filepaths TEXT[] NOT NULL, + factory_deps_filepath TEXT NOT NULL, + + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index c2e68b55c8f5..a161b7f132b3 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -340,6 +340,20 @@ }, "query": "INSERT INTO eth_txs_history (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at) VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3) RETURNING id" }, + "07bb6aa5f4ffe0b753cca8ac92c65bd7618db908250e5bf9e835f54b1dd04755": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "TextArray", + "Text" + ] + } + }, + "query": "INSERT INTO snapshots (l1_batch_number, storage_logs_filepaths, factory_deps_filepath, created_at, updated_at) VALUES ($1, $2, $3, NOW(), NOW())" + }, "09768b376996b96add16a02d1a59231cb9b525cd5bd19d22a76149962d4c91c2": { "describe": { "columns": [], @@ -644,6 +658,38 @@ }, "query": "SELECT l1_address FROM tokens WHERE market_volume > $1" }, + "1658e6fce121904c1353e51663fc307b01e02bc412ee46ac17e0f5acacd0b5c4": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "factory_deps_filepath", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "storage_logs_filepaths", + "ordinal": 2, + "type_info": "TextArray" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1" + }, "16bca6f4258ff3db90a26a8550c5fc35e666fb698960486528fceba3e452fd62": { "describe": { "columns": [ @@ -4871,6 +4917,26 @@ }, "query": "INSERT INTO events_queue (l1_batch_number, serialized_events_queue) VALUES ($1, $2)" }, + "61cc5a1564918a34b4235290c421f04c40ef935f72f2c72744a5b741439a966a": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT bytecode FROM factory_deps WHERE miniblock_number <= $1" + }, "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { "describe": { "columns": [ @@ -8172,6 +8238,26 @@ }, "query": "INSERT INTO basic_witness_input_producer_jobs (l1_batch_number, status, created_at, updated_at) VALUES ($1, $2, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" }, + "a190719309378ee1912ffedd8180c151aacf17c3ca3bfca8563fa404d587edc8": { + "describe": { + "columns": [ + { + "name": "index", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "\n SELECT index\n FROM initial_writes\n WHERE l1_batch_number <= $1\n ORDER BY l1_batch_number DESC , index DESC \n LIMIT 1;\n " + }, "a19b7137403c5cdf1be5f5122ce4d297ed661fa8bdb3bc91f8a81fe9da47469e": { "describe": { "columns": [ @@ -9162,6 +9248,36 @@ }, "query": "INSERT INTO prover_protocol_versions\n (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash,\n recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, verifier_address, created_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, now())\n " }, + "b707b6247c76a50bda3be8076aafb77de60cfc5a0cc61c7dd60e4330eabc28d7": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "factory_deps_filepath", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "storage_logs_filepaths", + "ordinal": 2, + "type_info": "TextArray" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots" + }, "b944df7af612ec911170a43be846eb2f6e27163b0d3983672de2b8d5d60af640": { "describe": { "columns": [ @@ -10032,6 +10148,58 @@ }, "query": "SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 ORDER BY l1_batch_number DESC LIMIT $1" }, + "dd650c06788a1c47b201e768382320fded2b8950ab836b2e5660f15b71dd11a0": { + "describe": { + "columns": [ + { + "name": "key!", + "ordinal": 0, + "type_info": "Bytea" + }, + { + "name": "value!", + "ordinal": 1, + "type_info": "Bytea" + }, + { + "name": "address!", + "ordinal": 2, + "type_info": "Bytea" + }, + { + "name": "miniblock_number!", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 4, + "type_info": "Int8" + }, + { + "name": "index", + "ordinal": 5, + "type_info": "Int8" + } + ], + "nullable": [ + true, + true, + true, + true, + true, + true + ], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea" + ] + } + }, + "query": "\n SELECT storage_logs.key as \"key!\",\n storage_logs.value as \"value!\",\n storage_logs.address as \"address!\",\n storage_logs.miniblock_number as \"miniblock_number!\",\n initial_writes.l1_batch_number as \"l1_batch_number!\",\n initial_writes.index\n FROM (SELECT hashed_key,\n max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n FROM storage_logs\n WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3\n GROUP BY hashed_key\n ORDER BY hashed_key) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;\n " + }, "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": { "describe": { "columns": [], diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index d934c2c06e63..f5c55524d6f3 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -16,6 +16,7 @@ use crate::{ fri_witness_generator_dal::FriWitnessGeneratorDal, gpu_prover_queue_dal::GpuProverQueueDal, proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, prover_dal::ProverDal, + snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_dal::StorageDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tokens_dal::TokensDal, @@ -49,6 +50,8 @@ pub mod proof_generation_dal; pub mod protocol_versions_dal; pub mod protocol_versions_web3_dal; pub mod prover_dal; +pub mod snapshots_creator_dal; +pub mod snapshots_dal; pub mod storage_dal; pub mod storage_logs_dal; pub mod storage_logs_dedup_dal; @@ -235,4 +238,12 @@ impl<'a> StorageProcessor<'a> { pub fn system_dal(&mut self) -> SystemDal<'_, 'a> { SystemDal { storage: self } } + + pub fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a> { + SnapshotsDal { storage: self } + } + + pub fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a> { + SnapshotsCreatorDal { storage: self } + } } diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs new file mode 100644 index 000000000000..b87003602598 --- /dev/null +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -0,0 +1,104 @@ +use zksync_types::{ + snapshots::{SnapshotFactoryDependency, SnapshotStorageLog}, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256, +}; + +use crate::{instrument::InstrumentExt, StorageProcessor}; + +#[derive(Debug)] +pub struct SnapshotsCreatorDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +impl SnapshotsCreatorDal<'_, '_> { + pub async fn get_distinct_storage_logs_keys_count( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> sqlx::Result { + let count = sqlx::query!( + r#" + SELECT index + FROM initial_writes + WHERE l1_batch_number <= $1 + ORDER BY l1_batch_number DESC , index DESC + LIMIT 1; + "#, + l1_batch_number.0 as i32 + ) + .instrument("get_storage_logs_count") + .report_latency() + .fetch_one(self.storage.conn()) + .await? + .index; + Ok(count as u64) + } + + pub async fn get_storage_logs_chunk( + &mut self, + miniblock_number: MiniblockNumber, + hashed_keys_range: std::ops::RangeInclusive, + ) -> sqlx::Result> { + let storage_logs = sqlx::query!( + r#" + SELECT storage_logs.key as "key!", + storage_logs.value as "value!", + storage_logs.address as "address!", + storage_logs.miniblock_number as "miniblock_number!", + initial_writes.l1_batch_number as "l1_batch_number!", + initial_writes.index + FROM (SELECT hashed_key, + max(ARRAY [miniblock_number, operation_number]::int[]) AS op + FROM storage_logs + WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3 + GROUP BY hashed_key + ORDER BY hashed_key) AS keys + INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key + AND storage_logs.miniblock_number = keys.op[1] + AND storage_logs.operation_number = keys.op[2] + INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key; + "#, + miniblock_number.0 as i64, + hashed_keys_range.start().0.as_slice(), + hashed_keys_range.end().0.as_slice(), + ) + .instrument("get_storage_logs_chunk") + .with_arg("miniblock_number", &miniblock_number) + .with_arg("min_hashed_key", &hashed_keys_range.start()) + .with_arg("max_hashed_key", &hashed_keys_range.end()) + .report_latency() + .fetch_all(self.storage.conn()) + .await? + .iter() + .map(|row| SnapshotStorageLog { + key: StorageKey::new( + AccountTreeId::new(Address::from_slice(&row.address)), + H256::from_slice(&row.key), + ), + value: H256::from_slice(&row.value), + l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32), + enumeration_index: row.index.unwrap() as u64, + }) + .collect(); + Ok(storage_logs) + } + + pub async fn get_all_factory_deps( + &mut self, + miniblock_number: MiniblockNumber, + ) -> sqlx::Result> { + let rows = sqlx::query!( + "SELECT bytecode FROM factory_deps WHERE miniblock_number <= $1", + miniblock_number.0 as i64, + ) + .instrument("get_all_factory_deps") + .report_latency() + .fetch_all(self.storage.conn()) + .await?; + Ok(rows + .into_iter() + .map(|row| SnapshotFactoryDependency { + bytecode: row.bytecode, + }) + .collect()) + } +} diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs new file mode 100644 index 000000000000..0e031b31d142 --- /dev/null +++ b/core/lib/dal/src/snapshots_dal.rs @@ -0,0 +1,134 @@ +use zksync_types::{ + snapshots::{AllSnapshots, SnapshotMetadata}, + L1BatchNumber, +}; + +use crate::{instrument::InstrumentExt, StorageProcessor}; + +#[derive(Debug)] +pub struct SnapshotsDal<'a, 'c> { + pub(crate) storage: &'a mut StorageProcessor<'c>, +} + +impl SnapshotsDal<'_, '_> { + pub async fn add_snapshot( + &mut self, + l1_batch_number: L1BatchNumber, + storage_logs_filepaths: &[String], + factory_deps_filepaths: &str, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + "INSERT INTO snapshots (l1_batch_number, storage_logs_filepaths, factory_deps_filepath, created_at, updated_at) \ + VALUES ($1, $2, $3, NOW(), NOW())", + l1_batch_number.0 as i32, + storage_logs_filepaths, + factory_deps_filepaths, + ) + .instrument("add_snapshot") + .report_latency() + .execute(self.storage.conn()) + .await?; + Ok(()) + } + + pub async fn get_all_snapshots(&mut self) -> Result { + let records: Vec = sqlx::query!( + "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots" + ) + .instrument("get_all_snapshots") + .report_latency() + .fetch_all(self.storage.conn()) + .await? + .into_iter() + .map(|r| L1BatchNumber(r.l1_batch_number as u32)) + .collect(); + Ok(AllSnapshots { + snapshots_l1_batch_numbers: records, + }) + } + + pub async fn get_snapshot_metadata( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> Result, sqlx::Error> { + let record: Option = sqlx::query!( + "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1", + l1_batch_number.0 as i32 + ) + .instrument("get_snapshot_metadata") + .report_latency() + .fetch_optional(self.storage.conn()) + .await? + .map(|r| SnapshotMetadata { + l1_batch_number: L1BatchNumber(r.l1_batch_number as u32), + factory_deps_filepath: r.factory_deps_filepath, + storage_logs_filepaths: r.storage_logs_filepaths, + }); + Ok(record) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::L1BatchNumber; + + use crate::ConnectionPool; + + #[tokio::test] + async fn adding_snapshot() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let mut dal = conn.snapshots_dal(); + let l1_batch_number = L1BatchNumber(100); + dal.add_snapshot(l1_batch_number, &[], "gs:///bucket/factory_deps.bin") + .await + .expect("Failed to add snapshot"); + + let snapshots = dal + .get_all_snapshots() + .await + .expect("Failed to retrieve snapshots"); + assert_eq!(1, snapshots.snapshots_l1_batch_numbers.len()); + assert_eq!( + snapshots.snapshots_l1_batch_numbers[0], + l1_batch_number as L1BatchNumber + ); + + let snapshot_metadata = dal + .get_snapshot_metadata(l1_batch_number) + .await + .expect("Failed to retrieve snapshot") + .unwrap(); + assert_eq!( + snapshot_metadata.l1_batch_number, + l1_batch_number as L1BatchNumber + ); + } + + #[tokio::test] + async fn adding_files() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + let mut dal = conn.snapshots_dal(); + let l1_batch_number = L1BatchNumber(100); + dal.add_snapshot( + l1_batch_number, + &[ + "gs:///bucket/test_file1.bin".to_string(), + "gs:///bucket/test_file2.bin".to_string(), + ], + "gs:///bucket/factory_deps.bin", + ) + .await + .expect("Failed to add snapshot"); + + let files = dal + .get_snapshot_metadata(l1_batch_number) + .await + .expect("Failed to retrieve snapshot") + .unwrap() + .storage_logs_filepaths; + assert!(files.contains(&"gs:///bucket/test_file1.bin".to_string())); + assert!(files.contains(&"gs:///bucket/test_file2.bin".to_string())); + } +} diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index a4a4af3f1ec0..126fbb19cdfe 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -23,6 +23,7 @@ pub mod object_store; mod proof_data_handler; mod prover; mod prover_group; +mod snapshots_creator; mod utils; mod witness_generator; diff --git a/core/lib/env_config/src/object_store.rs b/core/lib/env_config/src/object_store.rs index 3b4afe86b522..23b1abaf5166 100644 --- a/core/lib/env_config/src/object_store.rs +++ b/core/lib/env_config/src/object_store.rs @@ -30,6 +30,16 @@ impl FromEnv for ProverObjectStoreConfig { } } +#[derive(Debug)] +pub struct SnapshotsObjectStoreConfig(pub ObjectStoreConfig); + +impl FromEnv for SnapshotsObjectStoreConfig { + fn from_env() -> anyhow::Result { + let config = envy_load("snapshots_object_store", "SNAPSHOTS_OBJECT_STORE_")?; + Ok(Self(config)) + } +} + #[cfg(test)] mod tests { use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; @@ -93,4 +103,19 @@ mod tests { let actual = ProverObjectStoreConfig::from_env().unwrap().0; assert_eq!(actual, expected_config("/prover_base_url")); } + + #[test] + fn snapshots_bucket_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL="/snapshots_base_url" + SNAPSHOTS_OBJECT_STORE_MODE="FileBacked" + SNAPSHOTS_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts" + SNAPSHOTS_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json" + SNAPSHOTS_OBJECT_STORE_MAX_RETRIES="5" + "#; + lock.set_env(config); + let actual = SnapshotsObjectStoreConfig::from_env().unwrap().0; + assert_eq!(actual, expected_config("/snapshots_base_url")); + } } diff --git a/core/lib/env_config/src/snapshots_creator.rs b/core/lib/env_config/src/snapshots_creator.rs new file mode 100644 index 000000000000..6ed80e3780ce --- /dev/null +++ b/core/lib/env_config/src/snapshots_creator.rs @@ -0,0 +1,9 @@ +use zksync_config::SnapshotsCreatorConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for SnapshotsCreatorConfig { + fn from_env() -> anyhow::Result { + envy_load("snapshots_creator", "SNAPSHOTS_CREATOR_") + } +} diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index 20f52a995a8c..bd071d0d0024 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -20,6 +20,8 @@ bincode = "1" google-cloud-storage = "0.15.0" google-cloud-auth = "0.13.0" http = "0.2.9" +serde_json = "1.0" +flate2 = "1.0.28" tokio = { version = "1.21.2", features = ["full"] } tracing = "0.1" diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 6f589e83630d..2d77366a952c 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -32,6 +32,7 @@ impl FileBackedObjectStore { Bucket::NodeAggregationWitnessJobsFri, Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, + Bucket::StorageSnapshot, ] { let bucket_path = format!("{base_dir}/{bucket}"); fs::create_dir_all(&bucket_path) @@ -69,6 +70,10 @@ impl ObjectStore for FileBackedObjectStore { let filename = self.filename(bucket, key); fs::remove_file(filename).await.map_err(From::from) } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + format!("{}/{}", self.base_dir, bucket) + } } #[cfg(test)] diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index 53df970f7d0c..93ee39fdef2c 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -207,6 +207,14 @@ impl ObjectStore for GoogleCloudStorage { async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { self.remove_inner(bucket.as_str(), key).await } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + format!( + "https://storage.googleapis.com/{}/{}", + self.bucket_prefix.clone(), + bucket.as_str() + ) + } } #[cfg(test)] diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs index ac1a2fd7a444..f7ee7119c7a3 100644 --- a/core/lib/object_store/src/mock.rs +++ b/core/lib/object_store/src/mock.rs @@ -45,4 +45,8 @@ impl ObjectStore for MockStore { bucket_map.remove(key); Ok(()) } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + bucket.to_string() + } } diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index 35808bb4686d..89241c7edf7c 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -1,8 +1,14 @@ //! Stored objects. +use std::io::Read; + +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use zksync_types::{ aggregated_operations::L1BatchProofForL1, proofs::{AggregationRound, PrepareBasicCircuitsJob}, + snapshots::{ + SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, + }, storage::witness_block_state::WitnessBlockState, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, @@ -65,6 +71,59 @@ macro_rules! serialize_using_bincode { }; } +impl StoredObject for SnapshotFactoryDependencies { + const BUCKET: Bucket = Bucket::StorageSnapshot; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("snapshot_l1_batch_{key}_factory_deps.json.gzip") + } + + //TODO use better language agnostic serialization format like protobuf + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + serde_json::to_writer(&mut encoder, self).map_err(BoxedError::from)?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + serde_json::from_slice(&decompressed_bytes).map_err(From::from) + } +} + +impl StoredObject for SnapshotStorageLogsChunk { + const BUCKET: Bucket = Bucket::StorageSnapshot; + type Key<'a> = SnapshotStorageLogsStorageKey; + + fn encode_key(key: Self::Key<'_>) -> String { + format!( + "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.json.gzip", + key.l1_batch_number, key.chunk_id + ) + } + + //TODO use better language agnostic serialization format like protobuf + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + serde_json::to_writer(&mut encoder, self).map_err(BoxedError::from)?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + serde_json::from_slice(&decompressed_bytes).map_err(From::from) + } +} + impl StoredObject for WitnessBlockState { const BUCKET: Bucket = Bucket::WitnessInput; type Key<'a> = L1BatchNumber; @@ -244,4 +303,41 @@ impl dyn ObjectStore + '_ { self.put_raw(V::BUCKET, &key, bytes).await?; Ok(key) } + + pub fn get_storage_prefix(&self) -> String { + self.storage_prefix_raw(V::BUCKET) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_storage_logs_filesnames_generate_corretly() { + let filename1 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(42), + chunk_id: 97, + }); + let filename2 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(3), + chunk_id: 531, + }); + let filename3 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey { + l1_batch_number: L1BatchNumber(567), + chunk_id: 5, + }); + assert_eq!( + "snapshot_l1_batch_42_storage_logs_part_0097.json.gzip", + filename1 + ); + assert_eq!( + "snapshot_l1_batch_3_storage_logs_part_0531.json.gzip", + filename2 + ); + assert_eq!( + "snapshot_l1_batch_567_storage_logs_part_0005.json.gzip", + filename3 + ); + } } diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index c68b4cb978f6..72e582deeb2b 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,6 +19,7 @@ pub enum Bucket { NodeAggregationWitnessJobsFri, SchedulerWitnessJobsFri, ProofsFri, + StorageSnapshot, } impl Bucket { @@ -34,6 +35,7 @@ impl Bucket { Self::NodeAggregationWitnessJobsFri => "node_aggregation_witness_jobs_fri", Self::SchedulerWitnessJobsFri => "scheduler_witness_jobs_fri", Self::ProofsFri => "proofs_fri", + Self::StorageSnapshot => "storage_logs_snapshots", } } } @@ -113,6 +115,8 @@ pub trait ObjectStore: fmt::Debug + Send + Sync { /// /// Returns an error if removal fails. async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError>; + + fn storage_prefix_raw(&self, bucket: Bucket) -> String; } #[async_trait] @@ -133,6 +137,10 @@ impl ObjectStore for Arc { async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> { (**self).remove_raw(bucket, key).await } + + fn storage_prefix_raw(&self, bucket: Bucket) -> String { + (**self).storage_prefix_raw(bucket) + } } #[derive(Debug)] diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 4574824b37f8..32a62c4df804 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -44,6 +44,7 @@ pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod protocol_version; +pub mod snapshots; pub mod storage; pub mod storage_writes_deduplicator; pub mod system_contracts; diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs new file mode 100644 index 000000000000..794480ea550c --- /dev/null +++ b/core/lib/types/src/snapshots.rs @@ -0,0 +1,73 @@ +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{L1BatchNumber, MiniblockNumber}; + +use crate::{commitment::L1BatchWithMetadata, StorageKey, StorageValue}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AllSnapshots { + pub snapshots_l1_batch_numbers: Vec, +} + +// used in dal to fetch certain snapshot data +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotMetadata { + pub l1_batch_number: L1BatchNumber, + pub factory_deps_filepath: String, + pub storage_logs_filepaths: Vec, +} + +//contains all data not contained in factory_deps/storage_logs files to perform restore process +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotHeader { + pub l1_batch_number: L1BatchNumber, + pub miniblock_number: MiniblockNumber, + //ordered by chunk ids + pub storage_logs_chunks: Vec, + pub factory_deps_filepath: String, + pub last_l1_batch_with_metadata: L1BatchWithMetadata, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotStorageLogsChunkMetadata { + pub chunk_id: u64, + // can be either be a file available under http(s) or local filesystem path + pub filepath: String, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotStorageLogsStorageKey { + pub l1_batch_number: L1BatchNumber, + pub chunk_id: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotStorageLogsChunk { + pub storage_logs: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotStorageLog { + pub key: StorageKey, + pub value: StorageValue, + pub l1_batch_number_of_initial_write: L1BatchNumber, + pub enumeration_index: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotFactoryDependencies { + pub factory_deps: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SnapshotFactoryDependency { + pub bytecode: Vec, +} diff --git a/core/lib/web3_decl/src/namespaces/mod.rs b/core/lib/web3_decl/src/namespaces/mod.rs index 26e610c16449..66cff2a6dbb8 100644 --- a/core/lib/web3_decl/src/namespaces/mod.rs +++ b/core/lib/web3_decl/src/namespaces/mod.rs @@ -3,16 +3,19 @@ pub mod en; pub mod eth; pub mod eth_subscribe; pub mod net; +pub mod snapshots; pub mod web3; pub mod zks; #[cfg(feature = "client")] pub use self::{ debug::DebugNamespaceClient, en::EnNamespaceClient, eth::EthNamespaceClient, - net::NetNamespaceClient, web3::Web3NamespaceClient, zks::ZksNamespaceClient, + net::NetNamespaceClient, snapshots::SnapshotsNamespaceServer, web3::Web3NamespaceClient, + zks::ZksNamespaceClient, }; #[cfg(feature = "server")] pub use self::{ debug::DebugNamespaceServer, en::EnNamespaceServer, eth::EthNamespaceServer, - net::NetNamespaceServer, web3::Web3NamespaceServer, zks::ZksNamespaceServer, + net::NetNamespaceServer, snapshots::SnapshotsNamespaceClient, web3::Web3NamespaceServer, + zks::ZksNamespaceServer, }; diff --git a/core/lib/web3_decl/src/namespaces/snapshots.rs b/core/lib/web3_decl/src/namespaces/snapshots.rs new file mode 100644 index 000000000000..02f9aa6b36d2 --- /dev/null +++ b/core/lib/web3_decl/src/namespaces/snapshots.rs @@ -0,0 +1,28 @@ +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use zksync_types::{ + snapshots::{AllSnapshots, SnapshotHeader}, + L1BatchNumber, +}; + +#[cfg_attr( + all(feature = "client", feature = "server"), + rpc(server, client, namespace = "snapshots") +)] +#[cfg_attr( + all(feature = "client", not(feature = "server")), + rpc(client, namespace = "snapshots") +)] +#[cfg_attr( + all(not(feature = "client"), feature = "server"), + rpc(server, namespace = "snapshots") +)] +pub trait SnapshotsNamespace { + #[method(name = "getAllSnapshots")] + async fn get_all_snapshots(&self) -> RpcResult; + + #[method(name = "getSnapshot")] + async fn get_snapshot_by_l1_batch_number( + &self, + l1_batch_number: L1BatchNumber, + ) -> RpcResult>; +} diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs index 8fbd3919c26c..72a21e3c2509 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs @@ -4,3 +4,5 @@ pub mod eth; pub mod net; pub mod web3; pub mod zks; + +pub mod snapshots; diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs new file mode 100644 index 000000000000..aa542320af2c --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs @@ -0,0 +1,52 @@ +// Built-in uses + +// External uses +use jsonrpc_core::{BoxFuture, Result}; +use jsonrpc_derive::rpc; +use zksync_types::{ + snapshots::{AllSnapshots, SnapshotHeader}, + L1BatchNumber, +}; + +// Workspace uses +use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use crate::l1_gas_price::L1GasPriceProvider; +// Local uses +use crate::web3::namespaces::SnapshotsNamespace; + +#[rpc] +pub trait SnapshotsNamespaceT { + #[rpc(name = "snapshots_getAllSnapshots")] + fn get_all_snapshots(&self) -> BoxFuture>; + + #[rpc(name = "snapshots_getSnapshot")] + fn get_snapshot_by_l1_batch_number( + &self, + l1_batch_number: L1BatchNumber, + ) -> BoxFuture>>; +} + +impl SnapshotsNamespaceT for SnapshotsNamespace { + fn get_all_snapshots(&self) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_all_snapshots_impl() + .await + .map_err(into_jsrpc_error) + }) + } + + fn get_snapshot_by_l1_batch_number( + &self, + l1_batch_number: L1BatchNumber, + ) -> BoxFuture>> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_snapshot_by_l1_batch_number_impl(l1_batch_number) + .await + .map_err(into_jsrpc_error) + }) + } +} diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs index 2551b90e824e..3b76771a8cdf 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs @@ -3,5 +3,6 @@ pub mod en; pub mod eth; pub mod eth_subscribe; pub mod net; +pub mod snapshots; pub mod web3; pub mod zks; diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs new file mode 100644 index 000000000000..5a60fafa9dc5 --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs @@ -0,0 +1,31 @@ +use async_trait::async_trait; +use zksync_types::{ + snapshots::{AllSnapshots, SnapshotHeader}, + L1BatchNumber, +}; +use zksync_web3_decl::{jsonrpsee::core::RpcResult, namespaces::SnapshotsNamespaceServer}; + +use crate::{ + api_server::web3::{backend_jsonrpsee::into_jsrpc_error, namespaces::SnapshotsNamespace}, + l1_gas_price::L1GasPriceProvider, +}; + +#[async_trait] +impl SnapshotsNamespaceServer + for SnapshotsNamespace +{ + async fn get_all_snapshots(&self) -> RpcResult { + self.get_all_snapshots_impl() + .await + .map_err(into_jsrpc_error) + } + + async fn get_snapshot_by_l1_batch_number( + &self, + l1_batch_number: L1BatchNumber, + ) -> RpcResult> { + self.get_snapshot_by_l1_batch_number_impl(l1_batch_number) + .await + .map_err(into_jsrpc_error) + } +} diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 411c04112c90..6ee53088ecff 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -23,7 +23,7 @@ use zksync_web3_decl::{ }, namespaces::{ DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, NetNamespaceServer, - Web3NamespaceServer, ZksNamespaceServer, + SnapshotsNamespaceServer, Web3NamespaceServer, ZksNamespaceServer, }, types::Filter, }; @@ -40,7 +40,8 @@ use self::{ }, metrics::API_METRICS, namespaces::{ - DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace, + DebugNamespace, EnNamespace, EthNamespace, NetNamespace, SnapshotsNamespace, Web3Namespace, + ZksNamespace, }, pubsub::{EthSubscribe, PubSubEvent}, state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}, @@ -99,20 +100,11 @@ pub enum Namespace { Zks, En, Pubsub, + Snapshots, } impl Namespace { - pub const ALL: &'static [Namespace] = &[ - Namespace::Eth, - Namespace::Net, - Namespace::Web3, - Namespace::Debug, - Namespace::Zks, - Namespace::En, - Namespace::Pubsub, - ]; - - pub const NON_DEBUG: &'static [Namespace] = &[ + pub const DEFAULT: &'static [Namespace] = &[ Namespace::Eth, Namespace::Net, Namespace::Web3, @@ -343,9 +335,13 @@ impl ApiBuilder { .expect("Can't merge en namespace"); } if namespaces.contains(&Namespace::Debug) { - rpc.merge(DebugNamespace::new(rpc_state).await.into_rpc()) + rpc.merge(DebugNamespace::new(rpc_state.clone()).await.into_rpc()) .expect("Can't merge debug namespace"); } + if namespaces.contains(&Namespace::Snapshots) { + rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc()) + .expect("Can't merge snapshots namespace"); + } rpc } @@ -358,8 +354,10 @@ impl ApiBuilder { } if self.namespaces.is_none() { - tracing::warn!("debug_ API namespace will be disabled by default in ApiBuilder"); - self.namespaces = Some(Namespace::NON_DEBUG.to_vec()); + tracing::warn!( + "debug_ and snapshots_ API namespace will be disabled by default in ApiBuilder" + ); + self.namespaces = Some(Namespace::DEFAULT.to_vec()); } if self diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs index 8504717f3b9e..e1b77d381da0 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs @@ -5,10 +5,11 @@ mod debug; mod en; pub(crate) mod eth; mod net; +mod snapshots; mod web3; mod zks; pub use self::{ debug::DebugNamespace, en::EnNamespace, eth::EthNamespace, net::NetNamespace, - web3::Web3Namespace, zks::ZksNamespace, + snapshots::SnapshotsNamespace, web3::Web3Namespace, zks::ZksNamespace, }; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs new file mode 100644 index 000000000000..02dd3b18b22d --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs @@ -0,0 +1,102 @@ +use zksync_types::{ + snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata}, + L1BatchNumber, +}; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + api_server::web3::{ + backend_jsonrpc::error::internal_error, metrics::API_METRICS, state::RpcState, + }, + l1_gas_price::L1GasPriceProvider, +}; + +#[derive(Debug)] +pub struct SnapshotsNamespace { + state: RpcState, +} + +impl Clone for SnapshotsNamespace { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + } + } +} +impl SnapshotsNamespace { + pub fn new(state: RpcState) -> Self { + Self { state } + } + + pub async fn get_all_snapshots_impl(&self) -> Result { + let method_name = "get_all_snapshots"; + let method_latency = API_METRICS.start_call(method_name); + let mut storage_processor = self + .state + .connection_pool + .access_storage_tagged("api") + .await + .map_err(|err| internal_error(method_name, err))?; + let mut snapshots_dal = storage_processor.snapshots_dal(); + let response = snapshots_dal + .get_all_snapshots() + .await + .map_err(|err| internal_error(method_name, err)); + method_latency.observe(); + response + } + + pub async fn get_snapshot_by_l1_batch_number_impl( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result, Web3Error> { + let method_name = "get_snapshot_by_l1_batch_number"; + let method_latency = API_METRICS.start_call(method_name); + let mut storage_processor = self + .state + .connection_pool + .access_storage_tagged("api") + .await + .map_err(|err| internal_error(method_name, err))?; + let mut snapshots_dal = storage_processor.snapshots_dal(); + let snapshot_metadata = snapshots_dal + .get_snapshot_metadata(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))?; + if let Some(snapshot_metadata) = snapshot_metadata { + let snapshot_files = snapshot_metadata.storage_logs_filepaths.clone(); + let chunks = snapshot_files + .iter() + .enumerate() + .map(|(chunk_id, filepath)| SnapshotStorageLogsChunkMetadata { + chunk_id: chunk_id as u64, + filepath: filepath.clone(), + }) + .collect(); + let l1_batch_with_metadata = storage_processor + .blocks_dal() + .get_l1_batch_metadata(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))? + .unwrap(); + let miniblock_number = storage_processor + .blocks_dal() + .get_miniblock_range_of_l1_batch(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))? + .unwrap() + .1; + method_latency.observe(); + Ok(Some(SnapshotHeader { + l1_batch_number: snapshot_metadata.l1_batch_number, + miniblock_number, + last_l1_batch_with_metadata: l1_batch_with_metadata, + storage_logs_chunks: chunks, + factory_deps_filepath: snapshot_metadata.factory_deps_filepath, + })) + } else { + method_latency.observe(); + Ok(None) + } + } +} diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 8743330710cb..2f7fad8c6e10 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -132,7 +132,7 @@ async fn spawn_server( .with_threads(1) .with_tx_sender(tx_sender, vm_barrier) .with_pub_sub_events(pub_sub_events_sender) - .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) + .enable_api_namespaces(Namespace::DEFAULT.to_vec()) .build(stop_receiver) .await .expect("Failed spawning JSON-RPC server"); diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index f9192bf295cb..74192beecb6c 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1129,11 +1129,12 @@ async fn run_http_api( ) .await; - let namespaces = if with_debug_namespace { - Namespace::ALL.to_vec() - } else { - Namespace::NON_DEBUG.to_vec() - }; + let mut namespaces = Namespace::DEFAULT.to_vec(); + if with_debug_namespace { + namespaces.push(Namespace::Debug) + } + namespaces.push(Namespace::Snapshots); + let last_miniblock_pool = ConnectionPool::singleton(postgres_config.replica_url()?) .build() .await @@ -1185,6 +1186,9 @@ async fn run_ws_api( .await .context("failed to build last_miniblock_pool")?; + let mut namespaces = Namespace::DEFAULT.to_vec(); + namespaces.push(Namespace::Snapshots); + let mut api_builder = web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool) .ws(api_config.web3_json_rpc.ws_port) @@ -1202,7 +1206,7 @@ async fn run_ws_api( .with_threads(api_config.web3_json_rpc.ws_server_threads()) .with_tree_api(api_config.web3_json_rpc.tree_api_url()) .with_tx_sender(tx_sender, vm_barrier) - .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()); + .enable_api_namespaces(namespaces); if with_logs_request_translator_enabled { api_builder = api_builder.enable_request_translator(); diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index d296db7174f0..37e65991a583 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -10,7 +10,8 @@ "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts", "contract-verification-test": "zk f jest -- api/contract-verification.test.ts", "build": "hardhat compile", - "build-yul": "hardhat run scripts/compile-yul.ts" + "build-yul": "hardhat run scripts/compile-yul.ts", + "snapshots-creator-test": "zk f jest -- api/snapshots-creator.test.ts" }, "devDependencies": { "@matterlabs/hardhat-zksync-deploy": "^0.6.1", diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts new file mode 100644 index 000000000000..1938a53e80a5 --- /dev/null +++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts @@ -0,0 +1,85 @@ +import { TestMaster } from '../../src/index'; +import fs from 'fs'; +import * as zlib from 'zlib'; +import { snapshots_creator } from 'zk/build/run/run'; + +describe('Snapshots API tests', () => { + let testMaster: TestMaster; + + beforeAll(() => { + testMaster = TestMaster.getInstance(__filename); + + if (process.env.ZKSYNC_ENV!.startsWith('ext-node')) { + console.warn("You are trying to run snapshots creator tests on external node. It's not supported."); + } + }); + + async function runCreator() { + console.log('Starting creator'); + await snapshots_creator(); + } + + async function rpcRequest(name: string, params: any) { + const response = await testMaster.mainAccount().provider.send(name, params); + console.log(response); + return response; + } + + async function getAllSnapshots() { + return await rpcRequest('snapshots_getAllSnapshots', []); + } + + async function getSnapshot(snapshotL1Batch: number) { + return rpcRequest('snapshots_getSnapshot', [snapshotL1Batch]); + } + + async function decompressGzip(filePath: string): Promise { + return new Promise((resolve, reject) => { + const readStream = fs.createReadStream(filePath); + const gunzip = zlib.createGunzip(); + let data = ''; + + gunzip.on('data', (chunk) => (data += chunk.toString())); + gunzip.on('end', () => resolve(data)); + gunzip.on('error', reject); + + readStream.pipe(gunzip); + }); + } + async function createAndValidateSnapshot() { + const existingBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[]; + await runCreator(); + const newBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[]; + const addedSnapshots = newBatchNumbers.filter((x) => existingBatchNumbers.indexOf(x) === -1); + expect(addedSnapshots.length).toEqual(1); + + const l1BatchNumber = addedSnapshots[0]; + const fullSnapshot = await getSnapshot(l1BatchNumber); + const miniblockNumber = fullSnapshot.miniblockNumber; + + expect(fullSnapshot.l1BatchNumber).toEqual(l1BatchNumber); + for (let chunkMetadata of fullSnapshot.storageLogsChunks) { + console.log(`Verifying ${chunkMetadata.filepath}`); + let path = `${process.env.ZKSYNC_HOME}/${chunkMetadata.filepath}`; + + let output = JSON.parse(await decompressGzip(path)); + expect(output['storageLogs'].length > 0); + + for (const storageLog of output['storageLogs'] as any[]) { + const snapshotAccountAddress = storageLog['key']['account']['address']; + const snapshotKey = storageLog['key']['key']; + const snapshotValue = storageLog['value']; + const snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite']; + const valueOnBlockchain = await testMaster + .mainAccount() + .provider.getStorageAt(snapshotAccountAddress, snapshotKey, miniblockNumber); + expect(snapshotValue).toEqual(valueOnBlockchain); + expect(snapshotL1BatchNumber).toBeLessThanOrEqual(l1BatchNumber); + } + } + } + + test('snapshots can be created', async () => { + await createAndValidateSnapshot(); + }); +}); diff --git a/etc/env/base/object_store.toml b/etc/env/base/object_store.toml index 3ffec9f2ff65..5fd775acb371 100644 --- a/etc/env/base/object_store.toml +++ b/etc/env/base/object_store.toml @@ -18,3 +18,10 @@ mode="FileBacked" file_backed_base_path="artifacts" gcs_credential_file_path="/path/to/gcs_credentials.json" max_retries=5 + +[snapshots_object_store] +bucket_base_url="snapshots_base_url" +mode="FileBacked" +file_backed_base_path="artifacts" +gcs_credential_file_path="/path/to/gcs_credentials.json" +max_retries=5 diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index 12568cd5851d..b00c8e3e0bef 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -119,6 +119,11 @@ export async function cross_en_checker() { await utils.spawn(`${logLevel} ${suffix}`); } +export async function snapshots_creator() { + process.chdir(`${process.env.ZKSYNC_HOME}`); + let logLevel = 'RUST_LOG=snapshots_creator=debug'; + await utils.spawn(`${logLevel} cargo run --bin snapshots_creator --release`); +} export const command = new Command('run').description('run miscellaneous applications').addCommand(dataRestore.command); command.command('test-accounts').description('print ethereum test accounts').action(testAccounts); @@ -188,6 +193,8 @@ command await readVariable(address, contractName, variableName, cmd.file); }); +command.command('snapshots-creator').action(snapshots_creator); + command .command('cross-en-checker') .description('run the cross external nodes checker. See Checker Readme the default run mode and configuration.') diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts index cecc6fb49d80..64e164d62131 100644 --- a/infrastructure/zk/src/test/integration.ts +++ b/infrastructure/zk/src/test/integration.ts @@ -24,6 +24,11 @@ export async function contractVerification(bail: boolean = false) { await utils.spawn('yarn ts-integration contract-verification-test' + flag); } +export async function snapshotsCreator(bail: boolean = false) { + const flag = bail ? ' --bail' : ''; + await utils.spawn('yarn ts-integration snapshots-creator-test' + flag); +} + export async function server(options: string[] = []) { if (process.env.ZKSYNC_ENV?.startsWith('ext-node')) { process.env.ZKSYNC_WEB3_API_URL = `http://127.0.0.1:${process.env.EN_HTTP_PORT}`; @@ -174,6 +179,14 @@ command await contractVerification(cmd.bail); }); +command + .command('snapshots-creator') + .description('run snapshots creator tests') + .option('--bail') + .action(async (cmd: Command) => { + await snapshotsCreator(cmd.bail); + }); + command .command('testkit [options...]') .allowUnknownOption(true)